summaryrefslogtreecommitdiffstats
path: root/collectors
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-02-07 11:49:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-02-07 12:42:05 +0000
commit2e85f9325a797977eea9dfea0a925775ddd211d9 (patch)
tree452c7f30d62fca5755f659b99e4e53c7b03afc21 /collectors
parentReleasing debian version 1.19.0-4. (diff)
downloadnetdata-2e85f9325a797977eea9dfea0a925775ddd211d9.tar.xz
netdata-2e85f9325a797977eea9dfea0a925775ddd211d9.zip
Merging upstream version 1.29.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors')
-rw-r--r--collectors/COLLECTORS.md533
-rw-r--r--collectors/Makefile.am1
-rw-r--r--collectors/Makefile.in731
-rw-r--r--collectors/QUICKSTART.md125
-rw-r--r--collectors/README.md170
-rw-r--r--collectors/REFERENCE.md186
-rw-r--r--collectors/all.h8
-rw-r--r--collectors/apps.plugin/Makefile.in576
-rw-r--r--collectors/apps.plugin/README.md80
-rw-r--r--collectors/apps.plugin/apps_groups.conf29
-rw-r--r--collectors/apps.plugin/apps_plugin.c40
-rw-r--r--collectors/cgroups.plugin/Makefile.in618
-rw-r--r--collectors/cgroups.plugin/README.md17
-rw-r--r--collectors/cgroups.plugin/cgroup-name.sh218
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-name.sh.in538
-rw-r--r--collectors/cgroups.plugin/cgroup-network.c6
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.c874
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.h2
-rw-r--r--collectors/cgroups.plugin/tests/test_cgroups_plugin.c110
-rw-r--r--collectors/cgroups.plugin/tests/test_cgroups_plugin.h16
-rw-r--r--collectors/cgroups.plugin/tests/test_doubles.c162
-rw-r--r--collectors/charts.d.plugin/.keep0
-rw-r--r--collectors/charts.d.plugin/Makefile.am14
-rw-r--r--collectors/charts.d.plugin/Makefile.in1012
-rw-r--r--collectors/charts.d.plugin/README.md15
-rw-r--r--collectors/charts.d.plugin/ap/README.md19
-rw-r--r--collectors/charts.d.plugin/ap/ap.chart.sh60
-rw-r--r--collectors/charts.d.plugin/apache/README.md129
-rw-r--r--collectors/charts.d.plugin/apache/apache.chart.sh251
-rw-r--r--collectors/charts.d.plugin/apache/apache.conf30
-rw-r--r--collectors/charts.d.plugin/apcupsd/README.md20
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh136
-rw-r--r--collectors/charts.d.plugin/charts.d.conf18
-rw-r--r--collectors/charts.d.plugin/charts.d.plugin698
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.plugin.in54
-rw-r--r--collectors/charts.d.plugin/cpu_apps/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/cpu_apps/README.md6
-rw-r--r--collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh70
-rw-r--r--collectors/charts.d.plugin/cpu_apps/cpu_apps.conf19
-rw-r--r--collectors/charts.d.plugin/cpufreq/README.md6
-rw-r--r--collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh88
-rw-r--r--collectors/charts.d.plugin/cpufreq/cpufreq.conf24
-rw-r--r--collectors/charts.d.plugin/example/README.md5
-rw-r--r--collectors/charts.d.plugin/example/example.chart.sh106
-rw-r--r--collectors/charts.d.plugin/exim/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/exim/README.md6
-rw-r--r--collectors/charts.d.plugin/exim/exim.chart.sh46
-rw-r--r--collectors/charts.d.plugin/exim/exim.conf24
-rw-r--r--collectors/charts.d.plugin/hddtemp/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/hddtemp/README.md30
-rw-r--r--collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh77
-rw-r--r--collectors/charts.d.plugin/hddtemp/hddtemp.conf23
-rw-r--r--collectors/charts.d.plugin/libreswan/README.md20
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.chart.sh161
-rw-r--r--collectors/charts.d.plugin/load_average/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/load_average/README.md6
-rw-r--r--collectors/charts.d.plugin/load_average/load_average.chart.sh69
-rw-r--r--collectors/charts.d.plugin/load_average/load_average.conf22
-rw-r--r--collectors/charts.d.plugin/loopsleepms.sh.inc18
-rw-r--r--collectors/charts.d.plugin/mem_apps/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/mem_apps/README.md6
-rw-r--r--collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh62
-rw-r--r--collectors/charts.d.plugin/mem_apps/mem_apps.conf19
-rw-r--r--collectors/charts.d.plugin/mysql/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/mysql/README.md88
-rw-r--r--collectors/charts.d.plugin/mysql/mysql.chart.sh511
-rw-r--r--collectors/charts.d.plugin/mysql/mysql.conf23
-rw-r--r--collectors/charts.d.plugin/nginx/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/nginx/README.md6
-rw-r--r--collectors/charts.d.plugin/nginx/nginx.chart.sh141
-rw-r--r--collectors/charts.d.plugin/nginx/nginx.conf23
-rw-r--r--collectors/charts.d.plugin/nut/README.md22
-rw-r--r--collectors/charts.d.plugin/nut/nut.chart.sh120
-rw-r--r--collectors/charts.d.plugin/opensips/README.md18
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.chart.sh222
-rw-r--r--collectors/charts.d.plugin/phpfpm/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/phpfpm/README.md6
-rw-r--r--collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh169
-rw-r--r--collectors/charts.d.plugin/phpfpm/phpfpm.conf27
-rw-r--r--collectors/charts.d.plugin/postfix/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/postfix/README.md28
-rw-r--r--collectors/charts.d.plugin/postfix/postfix.chart.sh87
-rw-r--r--collectors/charts.d.plugin/postfix/postfix.conf25
-rw-r--r--collectors/charts.d.plugin/sensors/README.md21
-rw-r--r--collectors/charts.d.plugin/squid/README.md67
-rw-r--r--collectors/charts.d.plugin/squid/squid.chart.sh141
-rw-r--r--collectors/charts.d.plugin/squid/squid.conf26
-rw-r--r--collectors/charts.d.plugin/tomcat/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/tomcat/README.md6
-rw-r--r--collectors/charts.d.plugin/tomcat/tomcat.chart.sh152
-rw-r--r--collectors/charts.d.plugin/tomcat/tomcat.conf38
-rw-r--r--collectors/checks.plugin/Makefile.in519
-rw-r--r--collectors/checks.plugin/README.md5
-rw-r--r--collectors/cups.plugin/Makefile.in519
-rw-r--r--collectors/cups.plugin/README.md7
-rw-r--r--collectors/diskspace.plugin/Makefile.in519
-rw-r--r--collectors/diskspace.plugin/README.md10
-rw-r--r--collectors/diskspace.plugin/plugin_diskspace.c6
-rw-r--r--collectors/ebpf.plugin/Makefile.am25
-rw-r--r--collectors/ebpf.plugin/README.md400
-rw-r--r--collectors/ebpf.plugin/ebpf.c1953
-rw-r--r--collectors/ebpf.plugin/ebpf.conf45
-rw-r--r--collectors/ebpf.plugin/ebpf.h194
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c1086
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h431
-rw-r--r--collectors/ebpf.plugin/ebpf_kernel_reject_list.txt1
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c1071
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h138
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c1938
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.h276
-rw-r--r--collectors/ebpf.plugin/reset_netdata_trace.sh.in9
-rw-r--r--collectors/fping.plugin/Makefile.in646
-rw-r--r--collectors/fping.plugin/README.md7
-rw-r--r--collectors/fping.plugin/fping.plugin200
-rw-r--r--collectors/freebsd.plugin/Makefile.in519
-rw-r--r--collectors/freebsd.plugin/README.md5
-rw-r--r--collectors/freebsd.plugin/freebsd_getifaddrs.c2
-rw-r--r--collectors/freebsd.plugin/freebsd_ipfw.c8
-rw-r--r--collectors/freebsd.plugin/freebsd_sysctl.c2
-rw-r--r--collectors/freebsd.plugin/plugin_freebsd.c2
-rw-r--r--collectors/freeipmi.plugin/Makefile.in519
-rw-r--r--collectors/freeipmi.plugin/README.md16
-rw-r--r--collectors/freeipmi.plugin/freeipmi_plugin.c6
-rw-r--r--collectors/idlejitter.plugin/Makefile.in519
-rw-r--r--collectors/idlejitter.plugin/README.md5
-rw-r--r--collectors/idlejitter.plugin/plugin_idlejitter.c4
-rw-r--r--collectors/ioping.plugin/Makefile.in646
-rw-r--r--collectors/ioping.plugin/README.md7
-rw-r--r--collectors/ioping.plugin/ioping.plugin212
-rw-r--r--collectors/macos.plugin/Makefile.in519
-rw-r--r--collectors/macos.plugin/README.md7
-rw-r--r--collectors/macos.plugin/macos_fw.c30
-rw-r--r--collectors/macos.plugin/macos_mach_smi.c8
-rw-r--r--collectors/macos.plugin/macos_sysctl.c62
-rw-r--r--collectors/nfacct.plugin/Makefile.in519
-rw-r--r--collectors/nfacct.plugin/README.md5
-rw-r--r--collectors/nfacct.plugin/plugin_nfacct.c26
-rw-r--r--collectors/node.d.plugin/.keep0
-rw-r--r--collectors/node.d.plugin/Makefile.am1
-rw-r--r--collectors/node.d.plugin/Makefile.in865
-rw-r--r--collectors/node.d.plugin/README.md12
-rw-r--r--collectors/node.d.plugin/fronius/README.md10
-rw-r--r--collectors/node.d.plugin/named/README.md10
-rw-r--r--collectors/node.d.plugin/node.d.plugin303
-rw-r--r--collectors/node.d.plugin/node_modules/net-snmp.js4411
-rw-r--r--collectors/node.d.plugin/sma_webbox/README.md8
-rw-r--r--collectors/node.d.plugin/snmp/README.md353
-rw-r--r--collectors/node.d.plugin/snmp/snmp.node.js199
-rw-r--r--collectors/node.d.plugin/stiebeleltron/README.md14
-rw-r--r--collectors/perf.plugin/Makefile.in519
-rw-r--r--collectors/perf.plugin/README.md7
-rw-r--r--collectors/perf.plugin/perf_plugin.c2
-rw-r--r--collectors/plugins.d/Makefile.in702
-rw-r--r--collectors/plugins.d/README.md46
-rw-r--r--collectors/plugins.d/plugins_d.c712
-rw-r--r--collectors/plugins.d/plugins_d.h12
-rw-r--r--collectors/plugins.d/pluginsd_parser.c738
-rw-r--r--collectors/plugins.d/pluginsd_parser.h40
-rw-r--r--collectors/proc.plugin/Makefile.in519
-rw-r--r--collectors/proc.plugin/README.md121
-rw-r--r--collectors/proc.plugin/plugin_proc.c7
-rw-r--r--collectors/proc.plugin/plugin_proc.h7
-rw-r--r--collectors/proc.plugin/proc_loadavg.c7
-rw-r--r--collectors/proc.plugin/proc_mdstat.c1288
-rw-r--r--collectors/proc.plugin/proc_meminfo.c9
-rw-r--r--collectors/proc.plugin/proc_net_dev.c162
-rw-r--r--collectors/proc.plugin/proc_net_softnet_stat.c4
-rw-r--r--collectors/proc.plugin/proc_net_wireless.c453
-rw-r--r--collectors/proc.plugin/proc_pressure.c178
-rw-r--r--collectors/proc.plugin/proc_pressure.h31
-rw-r--r--collectors/proc.plugin/proc_stat.c1
-rw-r--r--collectors/proc.plugin/sys_class_infiniband.c704
-rw-r--r--collectors/python.d.plugin/.keep0
-rw-r--r--collectors/python.d.plugin/Makefile.am6
-rw-r--r--collectors/python.d.plugin/Makefile.in2092
-rw-r--r--collectors/python.d.plugin/README.md36
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md48
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py2
-rw-r--r--collectors/python.d.plugin/alarms/Makefile.inc (renamed from collectors/charts.d.plugin/apache/Makefile.inc)6
-rw-r--r--collectors/python.d.plugin/alarms/README.md58
-rw-r--r--collectors/python.d.plugin/alarms/alarms.chart.py71
-rw-r--r--collectors/python.d.plugin/alarms/alarms.conf50
-rw-r--r--collectors/python.d.plugin/am2320/README.md31
-rw-r--r--collectors/python.d.plugin/am2320/am2320.chart.py9
-rw-r--r--collectors/python.d.plugin/am2320/am2320.conf2
-rw-r--r--collectors/python.d.plugin/anomalies/Makefile.inc (renamed from collectors/charts.d.plugin/cpufreq/Makefile.inc)6
-rw-r--r--collectors/python.d.plugin/anomalies/README.md231
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.chart.py349
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.conf181
-rw-r--r--collectors/python.d.plugin/apache/README.md24
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py3
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md22
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py4
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md22
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py4
-rw-r--r--collectors/python.d.plugin/boinc/README.md25
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py2
-rw-r--r--collectors/python.d.plugin/ceph/README.md20
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py56
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf4
-rw-r--r--collectors/python.d.plugin/chrony/README.md37
-rw-r--r--collectors/python.d.plugin/couchdb/README.md18
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py24
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md20
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py2
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md18
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py4
-rw-r--r--collectors/python.d.plugin/dockerd/README.md20
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py5
-rw-r--r--collectors/python.d.plugin/dovecot/README.md24
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py5
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md22
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py8
-rw-r--r--collectors/python.d.plugin/energid/README.md22
-rw-r--r--collectors/python.d.plugin/energid/energid.chart.py19
-rw-r--r--collectors/python.d.plugin/example/README.md7
-rw-r--r--collectors/python.d.plugin/example/example.chart.py1
-rw-r--r--collectors/python.d.plugin/exim/README.md27
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py1
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md22
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py20
-rw-r--r--collectors/python.d.plugin/freeradius/README.md18
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py2
-rw-r--r--collectors/python.d.plugin/gearman/README.md24
-rw-r--r--collectors/python.d.plugin/gearman/gearman.chart.py26
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md58
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py3
-rw-r--r--collectors/python.d.plugin/haproxy/README.md20
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py7
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md20
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py4
-rw-r--r--collectors/python.d.plugin/hpssa/Makefile.inc (renamed from collectors/charts.d.plugin/squid/Makefile.inc)6
-rw-r--r--collectors/python.d.plugin/hpssa/README.md61
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.chart.py395
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.conf (renamed from collectors/python.d.plugin/unbound/unbound.conf)42
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md22
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py3
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf3
-rw-r--r--collectors/python.d.plugin/icecast/README.md22
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py1
-rw-r--r--collectors/python.d.plugin/ipfs/README.md47
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py37
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf11
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md32
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py66
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf7
-rw-r--r--collectors/python.d.plugin/litespeed/README.md20
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py24
-rw-r--r--collectors/python.d.plugin/logind/README.md37
-rw-r--r--collectors/python.d.plugin/megacli/README.md47
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py5
-rw-r--r--collectors/python.d.plugin/memcached/README.md21
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py1
-rw-r--r--collectors/python.d.plugin/mongodb/README.md24
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py2
-rw-r--r--collectors/python.d.plugin/monit/README.md18
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py28
-rw-r--r--collectors/python.d.plugin/mysql/README.md32
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py3
-rw-r--r--collectors/python.d.plugin/nginx/README.md24
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py1
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md25
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py1
-rw-r--r--collectors/python.d.plugin/nsd/README.md12
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py1
-rw-r--r--collectors/python.d.plugin/ntpd/README.md22
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py3
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md58
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py175
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf4
-rw-r--r--collectors/python.d.plugin/openldap/README.md20
-rw-r--r--collectors/python.d.plugin/openldap/openldap.chart.py25
-rw-r--r--collectors/python.d.plugin/openldap/openldap.conf15
-rw-r--r--collectors/python.d.plugin/oracledb/README.md31
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.chart.py121
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md22
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py3
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md54
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py10
-rw-r--r--collectors/python.d.plugin/portcheck/README.md20
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py1
-rw-r--r--collectors/python.d.plugin/postfix/README.md12
-rw-r--r--collectors/python.d.plugin/postgres/README.md49
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py105
-rw-r--r--collectors/python.d.plugin/postgres/postgres.conf9
-rw-r--r--collectors/python.d.plugin/powerdns/README.md20
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py1
-rw-r--r--collectors/python.d.plugin/proxysql/README.md33
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.chart.py5
-rw-r--r--collectors/python.d.plugin/puppet/README.md18
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py8
-rw-r--r--collectors/python.d.plugin/python.d.conf3
-rw-r--r--collectors/python.d.plugin/python.d.plugin784
-rw-r--r--collectors/python.d.plugin/python.d.plugin.in86
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py8
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py6
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py9
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py2
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py29
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py17
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py37
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py14
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/filelock.py451
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/monotonic.py42
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md46
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py152
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf6
-rw-r--r--collectors/python.d.plugin/redis/README.md20
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py46
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md20
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py1
-rw-r--r--collectors/python.d.plugin/retroshare/README.md46
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py1
-rw-r--r--collectors/python.d.plugin/riakkv/README.md24
-rw-r--r--collectors/python.d.plugin/riakkv/riakkv.chart.py55
-rw-r--r--collectors/python.d.plugin/samba/README.md26
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py33
-rw-r--r--collectors/python.d.plugin/sensors/README.md20
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py6
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md22
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py19
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md20
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py17
-rw-r--r--collectors/python.d.plugin/springboot/README.md24
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py6
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf8
-rw-r--r--collectors/python.d.plugin/squid/README.md20
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py1
-rw-r--r--collectors/python.d.plugin/tomcat/README.md20
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py2
-rw-r--r--collectors/python.d.plugin/tor/README.md24
-rw-r--r--collectors/python.d.plugin/tor/tor.chart.py3
-rw-r--r--collectors/python.d.plugin/traefik/README.md20
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py2
-rw-r--r--collectors/python.d.plugin/unbound/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/unbound/README.md114
-rw-r--r--collectors/python.d.plugin/unbound/unbound.chart.py318
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md21
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py2
-rw-r--r--collectors/python.d.plugin/varnish/README.md102
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py145
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md21
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py2
-rw-r--r--collectors/python.d.plugin/web_log/README.md36
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py13
-rw-r--r--collectors/slabinfo.plugin/Makefile.in543
-rw-r--r--collectors/slabinfo.plugin/README.md5
-rw-r--r--collectors/slabinfo.plugin/slabinfo.c18
-rw-r--r--collectors/statsd.plugin/.keep0
-rw-r--r--collectors/statsd.plugin/Makefile.am1
-rw-r--r--collectors/statsd.plugin/Makefile.in614
-rw-r--r--collectors/statsd.plugin/README.md30
-rw-r--r--collectors/statsd.plugin/statsd.c9
-rw-r--r--collectors/tc.plugin/Makefile.in617
-rw-r--r--collectors/tc.plugin/README.md13
-rw-r--r--collectors/tc.plugin/plugin_tc.c4
-rw-r--r--collectors/tc.plugin/tc-qos-helper.sh297
-rwxr-xr-xcollectors/tc.plugin/tc-qos-helper.sh.in10
-rw-r--r--collectors/xenstat.plugin/Makefile.in519
-rw-r--r--collectors/xenstat.plugin/README.md5
-rw-r--r--collectors/xenstat.plugin/xenstat_plugin.c124
361 files changed, 22217 insertions, 25988 deletions
diff --git a/collectors/COLLECTORS.md b/collectors/COLLECTORS.md
new file mode 100644
index 000000000..e718fd239
--- /dev/null
+++ b/collectors/COLLECTORS.md
@@ -0,0 +1,533 @@
+<!--
+title: "Supported collectors list"
+description: "Netdata gathers real-time metrics from hundreds of data sources using collectors. Most require zero configuration and are pre-configured out of the box."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/COLLECTORS.md
+-->
+
+# Supported collectors list
+
+Netdata uses collectors to help you gather metrics from your favorite applications and services and view them in
+real-time, interactive charts. The following list includes collectors for both external services/applications and
+internal system metrics.
+
+Learn more about [how collectors work](/docs/collect/how-collectors-work.md), and then learn how to [enable or
+configure](/docs/collect/enable-configure.md) any of the below collectors using the same process.
+
+Some collectors have both Go and Python versions as we continue our effort to migrate all collectors to Go. In these
+cases, _Netdata always prioritizes the Go version_, and we highly recommend you use the Go versions for the best
+experience.
+
+If you want to use a Python version of a collector, you need to explicitly [disable the Go
+version](/docs/collect/enable-configure.md), and enable the Python version. Netdata then skips the Go version and
+attempts to load the Python version and its accompanying configuration file.
+
+If you don't see the app/service you'd like to monitor in this list, check out our [GitHub
+issues](https://github.com/netdata/netdata/issues). Use the search bar to look for previous discussions about that
+collector—we may be looking for contributions from users such as yourself! If you don't see the collector there, make a
+[feature request](https://community.netdata.cloud/c/feature-requests/7/none) on our community forums.
+
+- [Service and application collectors](#service-and-application-collectors)
+ - [APM (application performance monitoring)](#apm-application-performance-monitoring)
+ - [Containers and VMs](#containers-and-vms)
+ - [Data stores](#data-stores)
+ - [Distributed computing](#distributed-computing)
+ - [Email](#email)
+ - [Kubernetes](#kubernetes)
+ - [Logs](#logs)
+ - [Messaging](#messaging)
+ - [Network](#network)
+ - [Provisioning](#provisioning)
+ - [Remote devices](#remote-devices)
+ - [Search](#search)
+ - [Storage](#storage)
+ - [Web](#web)
+- [System collectors](#system-collectors)
+ - [Applications](#applications)
+ - [Disks and filesystems](#disks-and-filesystems)
+ - [eBPF (extended Berkeley Packet Filter)](#ebpf)
+ - [Hardware](#hardware)
+ - [Memory](#memory)
+ - [Networks](#networks)
+ - [Processes](#processes)
+ - [Resources](#resources)
+ - [Users](#users)
+- [Netdata collectors](#netdata-collectors)
+- [Orchestrators](#orchestrators)
+- [Third-party collectors](#third-party-collectors)
+- [Etc](#etc)
+
+## Service and application collectors
+
+The Netdata Agent auto-detects and collects metrics from all of the services and applications below. You can also
+configure any of these collectors according to your setup and infrastructure.
+
+### Generic
+
+- [Prometheus endpoints](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus): Gathers
+ metrics from any number of Prometheus endpoints, with support to autodetect more than 600 services and applications.
+
+### APM (application performance monitoring)
+
+- [Go applications](/collectors/python.d.plugin/go_expvar/README.md): Monitor any Go application that exposes its
+ metrics with the `expvar` package from the Go standard library.
+- [Java Spring Boot 2
+ applications](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/springboot2/) (Go version):
+ Monitor running Java Spring Boot 2 applications that expose their metrics with the use of the Spring Boot Actuator.
+- [Java Spring Boot 2 applications](/collectors/python.d.plugin/springboot/README.md) (Python version): Monitor
+ running Java Spring Boot applications that expose their metrics with the use of the Spring Boot Actuator.
+- [statsd](/collectors/statsd.plugin/README.md): Implement a high performance `statsd` server for Netdata.
+- [phpDaemon](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/phpdaemon/): Collect worker
+ statistics (total, active, idle), and uptime for web and network applications.
+- [uWSGI](/collectors/python.d.plugin/uwsgi/README.md): Monitor performance metrics exposed by the uWSGI Stats
+ Server.
+
+### Containers and VMs
+
+- [Docker containers](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual Docker
+ containers using the cgroups collector plugin.
+- [DockerD](/collectors/python.d.plugin/dockerd/README.md): Collect container health statistics.
+- [Docker Engine](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/docker_engine/): Collect
+ runtime statistics from the `docker` daemon using the `metrics-address` feature.
+- [Docker Hub](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dockerhub/): Collect statistics
+ about Docker repositories, such as pulls, starts, status, time since last update, and more.
+- [Libvirt](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual Libvirt containers
+ using the cgroups collector plugin.
+- [LXC](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual LXC containers using
+ the cgroups collector plugin.
+- [LXD](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual LXD containers using
+ the cgroups collector plugin.
+- [systemd-nspawn](/collectors/cgroups.plugin/README.md): Monitor the health and performance of individual
+ systemd-nspawn containers using the cgroups collector plugin.
+- [vCenter Server Appliance](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/vcsa/): Monitor
+ appliance system, components, and software update health statuses via the Health API.
+- [vSphere](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/vsphere/): Collect host and virtual
+ machine performance metrics.
+- [Xen/XCP-ng](/collectors/xenstat.plugin/README.md): Collect XenServer and XCP-ng metrics using `libxenstat`.
+
+### Data stores
+
+- [CockroachDB](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/cockroachdb/): Monitor various
+ database components using `_status/vars` endpoint.
+- [Consul](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/consul/): Capture service and unbound
+ checks status (passing, warning, critical, maintenance).
+- [Couchbase](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/couchbase/): Gather per-bucket
+ metrics from any number of instances of the distributed JSON document database.
+- [CouchDB](/collectors/python.d.plugin/couchdb/README.md): Monitor database health and performance metrics
+ (reads/writes, HTTP traffic, replication status, etc).
+- [MongoDB](/collectors/python.d.plugin/mongodb/README.md): Collect memory-caching system performance metrics and
+ reads the server's response to `stats` command (stats interface).
+- [MySQL](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql/): Collect database global,
+ replication and per user statistics.
+- [OracleDB](/collectors/python.d.plugin/oracledb/README.md): Monitor database performance and health metrics.
+- [Pika](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/pika/): Gather metric, such as clients,
+ memory usage, queries, and more from the Redis interface-compatible database.
+- [Postgres](/collectors/python.d.plugin/postgres/README.md): Collect database health and performance metrics.
+- [ProxySQL](/collectors/python.d.plugin/proxysql/README.md): Monitor database backend and frontend performance
+ metrics.
+- [Redis (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/redis/): Monitor status from any
+ number of database instances by reading the server's response to the `INFO ALL` command.
+- [Redis (Python)](/collectors/python.d.plugin/redis/): Monitor database status by reading the server's response to
+ the `INFO` command.
+- [RethinkDB](/collectors/python.d.plugin/rethinkdbs/README.md): Collect database server and cluster statistics.
+- [Riak KV](/collectors/python.d.plugin/riakkv/README.md): Collect database stats from the `/stats` endpoint.
+- [Zookeeper](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/zookeeper/): Monitor application
+ health metrics reading the server's response to the `mntr` command.
+
+### Distributed computing
+
+- [BOINC](/collectors/python.d.plugin/boinc/README.md): Monitor the total number of tasks, open tasks, and task
+ states for the distributed computing client.
+- [Gearman](/collectors/python.d.plugin/gearman/README.md): Collect application summary (queued, running) and per-job
+ worker statistics (queued, idle, running).
+
+### Email
+
+- [Dovecot](/collectors/python.d.plugin/dovecot/README.md): Collect email server performance metrics by reading the
+ server's response to the `EXPORT global` command.
+- [EXIM](/collectors/python.d.plugin/exim/README.md): Uses the `exim` tool to monitor the queue length of a
+ mail/message transfer agent (MTA).
+- [Postfix](/collectors/python.d.plugin/postfix/README.md): Uses the `postqueue` tool to monitor the queue length of a
+ mail/message transfer agent (MTA).
+
+### Kubernetes
+
+- [Kubelet](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubelet/): Monitor one or more
+ instances of the Kubelet agent and collects metrics on number of pods/containers running, volume of Docker
+ operations, and more.
+- [kube-proxy](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubeproxy/): Collect
+ metrics, such as syncing proxy rules and REST client requests, from one or more instances of `kube-proxy`.
+- [Service discovery](https://github.com/netdata/agent-service-discovery/): Find what services are running on a
+ cluster's pods, converts that into configuration files, and exports them so they can be monitored by Netdata.
+
+### Logs
+
+- [Fluentd](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/fluentd/): Gather application
+ plugins metrics from an endpoint provided by `in_monitor plugin`.
+- [Logstash](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/logstash/): Monitor JVM threads,
+ memory usage, garbage collection statistics, and more.
+- [OpenVPN status logs](/collectors/python.d.plugin/ovpn_status_log/): Parse server log files and provide summary
+ (client, traffic) metrics.
+- [Squid web server logs](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/squidlog/): Tail Squid
+ access logs to return the volume of requests, types of requests, bandwidth, and much more.
+- [Web server logs (Go version for Apache,
+ NGINX)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog/): Tail access logs and provide
+ very detailed web server performance statistics. This module is able to parse 200k+ rows in less than half a second.
+- [Web server logs (Python version for Apache, NGINX, Squid)](/collectors/python.d.plugin/web_log/): Tail access log
+ file and collect web server/caching proxy metrics.
+
+### Messaging
+
+- [ActiveMQ](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/activemq/): Collect message broker
+ queues and topics statistics using the ActiveMQ Console API.
+- [Beanstalk](/collectors/python.d.plugin/beanstalk/README.md): Collect server and tube-level statistics, such as CPU
+ usage, jobs rates, commands, and more.
+- [Pulsar](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/pulsar/): Collect summary,
+ namespaces, and topics performance statistics.
+- [RabbitMQ (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/rabbitmq/): Collect message
+ broker overview, system and per virtual host metrics.
+- [RabbitMQ (Python)](/collectors/python.d.plugin/rabbitmq/README.md): Collect message broker global and per virtual
+ host metrics.
+- [VerneMQ](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/vernemq/): Monitor MQTT broker
+ health and performance metrics. It collects all available info for both MQTTv3 and v5 communication
+
+### Network
+
+- [Bind 9](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/bind/): Collect nameserver summary
+ performance statistics via a web interface (`statistics-channels` feature).
+- [Chrony](/collectors/python.d.plugin/chrony/README.md): Monitor the precision and statistics of a local `chronyd`
+ server.
+- [CoreDNS](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/coredns/): Measure DNS query round
+ trip time.
+- [Dnsmasq](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dnsmasq_dhcp/): Automatically
+ detects all configured `Dnsmasq` DHCP ranges and Monitor their utilization.
+- [DNSdist (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dnsdist/): Collect
+ load-balancer performance and health metrics.
+- [DNSdist (Python)](/collectors/python.d.plugin/dnsdist/README.md): Collect load-balancer performance and health
+ metrics.
+- [Dnsmasq DNS Forwarder](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dnsmasq/): Gather
+ queries, entries, operations, and events for the lightweight DNS forwarder.
+- [dns_query](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/dnsquery/): Monitor the round
+ trip time for DNS queries in milliseconds.
+- [DNS Query Time](/collectors/python.d.plugin/dns_query_time/README.md): Measure DNS query round trip time.
+- [Freeradius (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/freeradius/): Collect
+ server authentication and accounting statistics from the `status server`.
+- [Freeradius (Python)](/collectors/python.d.plugin/freeradius/README.md): Collect server authentication and
+ accounting statistics from the `status server` using the `radclient` tool.
+- [Libreswan](/collectors/charts.d.plugin/libreswan/): Collect bytes-in, bytes-out, and uptime metrics.
+- [Icecast](/collectors/python.d.plugin/icecast/README.md): Monitor the number of listeners for active sources.
+- [ISC BIND](/collectors/node.d.plugin/named/README.md): Collect nameserver summary performance statistics via a web
+ interface (`statistics-channels` feature).
+- [ISC Bind (RDNC)](/collectors/python.d.plugin/bind_rndc/README.md): Collect nameserver summary performance
+ statistics using the `rndc` tool.
+- [ISC DHCP (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/isc_dhcpd): Reads a
+ `dhcpd.leases` file and collects metrics on total active leases, pool active leases, and pool utilization.
+- [ISC DHCP (Python)](/collectors/python.d.plugin/isc_dhcpd/README.md): Reads `dhcpd.leases` file and reports DHCP
+ pools utilization and leases statistics (total number, leases per pool).
+- [OpenLDAP](/collectors/python.d.plugin/openldap/README.md): Provides statistics information from the OpenLDAP
+ (`slapd`) server.
+- [NSD](/collectors/python.d.plugin/nsd/README.md): Monitor nameserver performance metrics using the `nsd-control`
+ tool.
+- [NTP daemon](/collectors/python.d.plugin/ntpd/README.md): Monitor the system variables of the local `ntpd` daemon
+ (optionally including variables of the polled peers) using the NTP Control Message Protocol via a UDP socket.
+- [OpenSIPS](/collectors/charts.d.plugin/opensips/README.md): Collect server health and performance metrics using the
+ `opensipsctl` tool.
+- [OpenVPN](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/openvpn/): Gather server summary
+ (client, traffic) and per user metrics (traffic, connection time) stats using `management-interface`.
+- [Pi-hole](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/pihole/): Monitor basic (DNS
+ queries, clients, blocklist) and extended (top clients, top permitted, and blocked domains) statistics using the PHP
+ API.
+- [PowerDNS Authoritative Server
+ (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/powerdns): Monitor one or more instances
+ of the nameserver software to collect questions, events, and latency metrics.
+- [PowerDNS Recursor (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/powerdns_recursor):
+ Gather incoming/outgoing questions, drops, timeouts, and cache usage from any number of DNS recursor instances.
+- [PowerDNS (Python)](/collectors/python.d.plugin/powerdns/README.md): Monitor authoritative server and recursor
+ statistics.
+- [RetroShare](/collectors/python.d.plugin/retroshare/README.md): Monitor application bandwidth, peers, and DHT
+ metrics.
+- [Tor](/collectors/python.d.plugin/tor/README.md): Capture traffic usage statistics using the Tor control port.
+- [Unbound](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/unbound/): Collect DNS resolver
+ summary and extended system and per thread metrics via the `remote-control` interface.
+
+### Provisioning
+
+- [Puppet](/collectors/python.d.plugin/puppet/README.md): Monitor the status of Puppet Server and Puppet DB.
+
+### Remote devices
+
+- [AM2320](/collectors/python.d.plugin/am2320/README.md): Monitor sensor temperature and humidity.
+- [Access point](/collectors/charts.d.plugin/ap/README.md): Monitor client, traffic and signal metrics using the `aw`
+ tool.
+- [APC UPS](/collectors/charts.d.plugin/apcupsd/README.md): Capture status information using the `apcaccess` tool.
+- [Energi Core](/collectors/python.d.plugin/energid/README.md): Monitor blockchain, memory, network, and unspent
+ transactions statistics.
+- [Fronius Symo](/collectors/node.d.plugin/fronius/): Collect power, consumption, autonomy, energy, and inverter
+ statistics.
+- [UPS/PDU](/collectors/charts.d.plugin/nut/README.md): Read the status of UPS/PDU devices using the `upsc` tool.
+- [SMA Sunny WebBox](/collectors/node.d.plugin/sma_webbox/README.md): Collect power statistics.
+- [SNMP devices](/collectors/node.d.plugin/snmp/README.md): Gather data using the SNMP protocol.
+- [Stiebel Eltron ISG](/collectors/node.d.plugin/stiebeleltron/README.md): Collect metrics from heat pump and hot
+ water installations.
+- [1-Wire sensors](/collectors/python.d.plugin/w1sensor/README.md): Monitor sensor temperature.
+
+### Search
+
+- [Elasticsearch (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/elasticsearch): Collect
+ dozens of metrics on search engine performance from local nodes and local indices. Includes cluster health and
+ statistics.
+- [Elasticsearch (Python)](/collectors/python.d.plugin/elasticsearch/README.md): Collect search engine performance and
+ health statistics. Optionally collects per-index metrics.
+- [Solr](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/solr/): Collect application search
+ requests, search errors, update requests, and update errors statistics.
+
+### Storage
+
+- [Ceph](/collectors/python.d.plugin/ceph/README.md): Monitor the Ceph cluster usage and server data consumption.
+- [HDFS](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/hdfs/): Monitor health and performance
+ metrics for filesystem datanodes and namenodes.
+- [IPFS](/collectors/python.d.plugin/ipfs/README.md): Collect file system bandwidth, peers, and repo metrics.
+- [Scaleio](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/scaleio/): Monitor storage system,
+ storage pools, and SDCS health and performance metrics via VxFlex OS Gateway API.
+- [Samba](/collectors/python.d.plugin/samba/README.md): Collect file sharing metrics using the `smbstatus` tool.
+
+### Web
+
+- [Apache (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/apache/): Collect Apache web
+ server performance metrics via the `server-status?auto` endpoint.
+- [Apache (Python)](/collectors/python.d.plugin/apache/README.md): Collect Apache web server performance metrics via
+ the `server-status?auto` endpoint.
+- [HAProxy](/collectors/python.d.plugin/haproxy/README.md): Collect frontend, backend, and health metrics.
+- [HTTP endpoints (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/httpcheck/): Monitor
+ any HTTP endpoint's availability and response time.
+- [HTTP endpoints (Python)](/collectors/python.d.plugin/httpcheck/README.md): Monitor any HTTP endpoint's
+ availability and response time.
+- [Lighttpd](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/lighttpd/): Collect web server
+ performance metrics using the `server-status?auto` endpoint.
+- [Lighttpd2](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/lighttpd2/): Collect web server
+ performance metrics using the `server-status?format=plain` endpoint.
+- [Litespeed](/collectors/python.d.plugin/litespeed/README.md): Collect web server data (network, connection,
+ requests, cache) by reading `.rtreport*` files.
+- [Nginx (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx/): Monitor web server
+ status information by gathering metrics via `ngx_http_stub_status_module`.
+- [Nginx (Python)](/collectors/python.d.plugin/nginx/README.md): Monitor web server status information by gathering
+ metrics via `ngx_http_stub_status_module`.
+- [Nginx Plus](/collectors/python.d.plugin/nginx_plus/README.md): Collect global and per-server zone, upstream, and
+ cache metrics.
+- [PHP-FPM (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/phpfpm/): Collect application
+ summary and processes health metrics by scraping the status page (`/status?full`).
+- [PHP-FPM (Python)](/collectors/python.d.plugin/phpfpm/README.md): Collect application summary and processes health
+ metrics by scraping the status page (`/status?full`).
+- [TCP endpoints (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/portcheck/): Monitor any
+ TCP endpoint's availability and response time.
+- [TCP endpoints (Python)](/collectors/python.d.plugin/portcheck/README.md): Monitor any TCP endpoint's availability
+ and response time.
+- [Spigot Minecraft servers](/collectors/python.d.plugin/spigotmc/README.md): Monitor average ticket rate and number
+ of users.
+- [Squid](/collectors/python.d.plugin/squid/README.md): Monitor client and server bandwidth/requests by gathering
+ data from the Cache Manager component.
+- [Tengine](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/tengine/): Monitor web server
+ statistics using information provided by `ngx_http_reqstat_module`.
+- [Tomcat](/collectors/python.d.plugin/tomcat/README.md): Collect web server performance metrics from the Manager App
+ (`/manager/status?XML=true`).
+- [Traefik](/collectors/python.d.plugin/traefik/README.md): Uses Trafik's Health API to provide statistics.
+- [Varnish](/collectors/python.d.plugin/varnish/README.md): Provides HTTP accelerator global, backends (VBE), and
+ disks (SMF) statistics using the `varnishstat` tool.
+- [x509 check](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/x509check/): Monitor certificate
+ expiration time.
+- [Whois domain expiry](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/whoisquery/): Checks the
+ remaining time until a given domain is expired.
+
+## System collectors
+
+The Netdata Agent can collect these system- and hardware-level metrics using a variety of collectors, some of which
+(such as `proc.plugin`) collect multiple types of metrics simultaneously.
+
+### Applications
+
+- [Fail2ban](/collectors/python.d.plugin/fail2ban/README.md): Parses configuration files to detect all jails, then
+ uses log files to report ban rates and volume of banned IPs.
+- [Monit](/collectors/python.d.plugin/monit/README.md): Monitor statuses of targets (service-checks) using the XML
+ stats interface.
+- [WMI (Windows Management Instrumentation)
+ exporter](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/wmi/): Collect CPU, memory,
+ network, disk, OS, system, and log-in metrics scraping `wmi_exporter`.
+
+### Disks and filesystems
+
+- [BCACHE](/collectors/proc.plugin/README.md): Monitor BCACHE statistics with the the `proc.plugin` collector.
+- [Block devices](/collectors/proc.plugin/README.md): Gather metrics about the health and performance of block
+ devices using the the `proc.plugin` collector.
+- [Btrfs](/collectors/proc.plugin/README.md): Monitors Btrfs filesystems with the the `proc.plugin` collector.
+- [Device mapper](/collectors/proc.plugin/README.md): Gather metrics about the Linux device mapper with the proc
+ collector.
+- [Disk space](/collectors/diskspace.plugin/README.md): Collect disk space usage metrics on Linux mount points.
+- [Files and directories](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/filecheck): Gather
+ metrics about the existence, modification time, and size of files or directories.
+- [ioping.plugin](/collectors/ioping.plugin/README.md): Measure disk read/write latency.
+- [NFS file servers and clients](/collectors/proc.plugin/README.md): Gather operations, utilization, and space usage
+ using the the `proc.plugin` collector.
+- [RAID arrays](/collectors/proc.plugin/README.md): Collect health, disk status, operation status, and more with the
+ the `proc.plugin` collector.
+- [Veritas Volume Manager](/collectors/proc.plugin/README.md): Gather metrics about the Veritas Volume Manager (VVM).
+- [ZFS](/collectors/proc.plugin/README.md): Monitor bandwidth and utilization of ZFS disks/partitions using the proc
+ collector.
+
+### eBPF
+
+- [Files](/collectors/ebpf.plugin/README.md): Provides information about how often a system calls kernel
+ functions related to file descriptors using the eBPF collector.
+- [Virtual file system (VFS)](/collectors/ebpf.plugin/README.md): Monitor IO, errors, deleted objects, and
+ more for kernel virtual file systems (VFS) using the eBPF collector.
+- [Processes](/collectors/ebpf.plugin/README.md): Monitor threads, task exits, and errors using the eBPF collector.
+
+### Hardware
+
+- [Adaptec RAID](/collectors/python.d.plugin/adaptec_raid/README.md): Monitor logical and physical devices health
+ metrics using the `arcconf` tool.
+- [CUPS](/collectors/cups.plugin/README.md): Monitor CUPS.
+- [FreeIPMI](/collectors/freeipmi.plugin/README.md): Uses `libipmimonitoring-dev` or `libipmimonitoring-devel` to
+ monitor the number of sensors, temperatures, voltages, currents, and more.
+- [Hard drive temperature](/collectors/python.d.plugin/hddtemp/README.md): Monitor the temperature of storage
+ devices.
+- [HP Smart Storage Arrays](/collectors/python.d.plugin/hpssa/README.md): Monitor controller, cache module, logical
+ and physical drive state, and temperature using the `ssacli` tool.
+- [MegaRAID controllers](/collectors/python.d.plugin/megacli/README.md): Collect adapter, physical drives, and
+ battery stats using the `megacli` tool.
+- [NVIDIA GPU](/collectors/python.d.plugin/nvidia_smi/README.md): Monitor performance metrics (memory usage, fan
+ speed, pcie bandwidth utilization, temperature, and more) using the `nvidia-smi` tool.
+- [Sensors](/collectors/python.d.plugin/sensors/README.md): Reads system sensors information (temperature, voltage,
+ electric current, power, and more) from `/sys/devices/`.
+- [S.M.A.R.T](/collectors/python.d.plugin/smartd_log/README.md): Reads SMART Disk Monitoring daemon logs.
+
+### Memory
+
+- [Available memory](/collectors/proc.plugin/README.md): Tracks changes in available RAM using the the `proc.plugin`
+ collector.
+- [Committed memory](/collectors/proc.plugin/README.md): Monitor committed memory using the `proc.plugin` collector.
+- [Huge pages](/collectors/proc.plugin/README.md): Gather metrics about huge pages in Linux and FreeBSD with the
+ `proc.plugin` collector.
+- [KSM](/collectors/proc.plugin/README.md): Measure the amount of merging, savings, and effectiveness using the
+ `proc.plugin` collector.
+- [Memcached](/collectors/python.d.plugin/memcached/README.md): Collect memory-caching system performance metrics.
+- [Numa](/collectors/proc.plugin/README.md): Gather metrics on the number of non-uniform memory access (NUMA) events
+ every second using the `proc.plugin` collector.
+- [Page faults](/collectors/proc.plugin/README.md): Collect the number of memory page faults per second using the
+ `proc.plugin` collector.
+- [RAM](/collectors/proc.plugin/README.md): Collect metrics on system RAM, available RAM, and more using the
+ `proc.plugin` collector.
+- [SLAB](/collectors/slabinfo.plugin/README.md): Collect kernel SLAB details on Linux systems.
+- [swap](/collectors/proc.plugin/README.md): Monitor the amount of free and used swap at every second using the
+ `proc.plugin` collector.
+- [Writeback memory](/collectors/proc.plugin/README.md): Collect how much memory is actively being written to disk at
+ every second using the `proc.plugin` collector.
+
+### Networks
+
+- [Access points](/collectors/charts.d.plugin/ap/README.md): Visualizes data related to access points.
+- [fping.plugin](fping.plugin/README.md): Measure network latency, jitter and packet loss between the monitored node
+ and any number of remote network end points.
+- [Netfilter](/collectors/nfacct.plugin/README.md): Collect netfilter firewall, connection tracker, and accounting
+ metrics using `libmnl` and `libnetfilter_acct`.
+- [Network stack](/collectors/proc.plugin/README.md): Monitor the networking stack for errors, TCP connection aborts,
+ bandwidth, and more.
+- [Network QoS](/collectors/tc.plugin/README.md): Collect traffic QoS metrics (`tc`) of Linux network interfaces.
+- [SYNPROXY](/collectors/proc.plugin/README.md): Monitor entries uses, SYN packets received, TCP cookies, and more.
+
+### Operating systems
+
+- [freebsd.plugin](freebsd.plugin/README.md): Collect resource usage and performance data on FreeBSD systems.
+- [macOS](/collectors/macos.plugin/README.md): Collect resource usage and performance data on macOS systems.
+
+### Processes
+
+- [Applications](/collectors/apps.plugin/README.md): Gather CPU, disk, memory, network, eBPF, and other metrics per
+ application using the `apps.plugin` collector.
+- [systemd](/collectors/cgroups.plugin/README.md): Monitor the CPU and memory usage of systemd services using the
+ `cgroups.plugin` collector.
+- [systemd unit states](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/systemdunits): See the
+ state (active, inactive, activating, deactivating, failed) of various systemd unit types.
+- [System processes](/collectors/proc.plugin/README.md): Collect metrics on system load and total processes running
+ using `/proc/loadavg` and the `proc.plugin` collector.
+- [Uptime](/collectors/proc.plugin/README.md): Monitor the uptime of a system using the `proc.plugin` collector.
+
+### Resources
+
+- [CPU frequency](/collectors/proc.plugin/README.md): Monitor CPU frequency, as set by the `cpufreq` kernel module,
+ using the `proc.plugin` collector.
+- [CPU idle](/collectors/proc.plugin/README.md): Measure CPU idle every second using the `proc.plugin` collector.
+- [CPU performance](/collectors/perf.plugin/README.md): Collect CPU performance metrics using performance monitoring
+ units (PMU).
+- [CPU throttling](/collectors/proc.plugin/README.md): Gather metrics about thermal throttling using the `/proc/stat`
+ module and the `proc.plugin` collector.
+- [CPU utilization](/collectors/proc.plugin/README.md): Capture CPU utilization, both system-wide and per-core, using
+ the `/proc/stat` module and the `proc.plugin` collector.
+- [Entropy](/collectors/proc.plugin/README.md): Monitor the available entropy on a system using the `proc.plugin`
+ collector.
+- [Interprocess Communication (IPC)](/collectors/proc.plugin/README.md): Monitor IPC semaphores and shared memory
+ using the `proc.plugin` collector.
+- [Interrupts](/collectors/proc.plugin/README.md): Monitor interrupts per second using the `proc.plugin` collector.
+- [IdleJitter](/collectors/idlejitter.plugin/README.md): Measure CPU latency and jitter on all operating systems.
+- [SoftIRQs](/collectors/proc.plugin/README.md): Collect metrics on SoftIRQs, both system-wide and per-core, using the
+ `proc.plugin` collector.
+- [SoftNet](/collectors/proc.plugin/README.md): Capture SoftNet events per second, both system-wide and per-core,
+ using the `proc.plugin` collector.
+
+### Users
+
+- [systemd-logind](/collectors/python.d.plugin/logind/README.md): Monitor active sessions, users, and seats tracked
+ by `systemd-logind` or `elogind`.
+- [User/group usage](/collectors/apps.plugin/README.md): Gather CPU, disk, memory, network, and other metrics per user
+ and user group using the `apps.plugin` collector.
+
+## Netdata collectors
+
+These collectors are recursive in nature, in that they monitor some function of the Netdata Agent itself. Some
+collectors are described only in code and associated charts in Netdata dashboards.
+
+- [ACLK (code only)](https://github.com/netdata/netdata/blob/master/aclk/aclk_stats.c): View whether a Netdata Agent
+ is connected to Netdata Cloud via the [ACLK](/aclk/README.md), the volume of queries, process times, and more.
+- [Alarms](https://learn.netdata.cloud/docs/agent/collectors/python.d.plugin/alarms): This collector creates an
+ <strong>Alarms</strong> menu with one line plot showing the alarm states of a Netdata Agent over time.
+- [Anomalies](https://learn.netdata.cloud/docs/agent/collectors/python.d.plugin/anomalies): This collector uses the
+ Python PyOD library to perform unsupervised anomaly detection on your Netdata charts and/or dimensions.
+- [Exporting (code only)](https://github.com/netdata/netdata/blob/master/exporting/send_internal_metrics.c): Gather
+ metrics on CPU utilization for the [exporting engine](/exporting/README.md), and specific metrics for each enabled
+ exporting connector.
+- [Global statistics (code only)](https://github.com/netdata/netdata/blob/master/daemon/global_statistics.c): See
+ metrics on the CPU utilization, network traffic, volume of web clients, API responses, database engine usage, and
+ more.
+
+## Orchestrators
+
+Plugin orchestrators organize and run many of the above collectors.
+
+If you're interested in developing a new collector that you'd like to contribute to Netdata, we highly recommend using
+the `go.d.plugin`.
+
+- [go.d.plugin](https://github.com/netdata/go.d.plugin): An orchestrator for data collection modules written in `go`.
+- [python.d.plugin](python.d.plugin/README.md): An orchestrator for data collection modules written in `python` v2/v3.
+- [charts.d.plugin](charts.d.plugin/README.md): An orchestrator for data collection modules written in `bash` v4+.
+- [node.d.plugin](node.d.plugin/README.md): An orchestrator for data collection modules written in `node.js`.
+
+## Third-party collectors
+
+These collectors are developed and maintained by third parties and, unlike the other collectors, are not installed by
+default. To use a third-party collector, visit their GitHub/documentation page and follow their installation procedures.
+
+- [CyberPower UPS](https://github.com/HawtDogFlvrWtr/netdata_cyberpwrups_plugin): Polls CyberPower UPS data using
+ PowerPanel® Personal Linux.
+- [Logged-in users](https://github.com/veksh/netdata-numsessions): Collect the number of currently logged-on users.
+- [nim-netdata-plugin](https://github.com/FedericoCeratto/nim-netdata-plugin): A helper to create native Netdata
+ plugins using Nim.
+- [Nvidia GPUs](https://github.com/coraxx/netdata_nv_plugin): Monitor Nvidia GPUs.
+- [Teamspeak 3](https://github.com/coraxx/netdata_ts3_plugin): Plls active users and bandwidth from TeamSpeak 3
+ servers.
+- [SSH](https://github.com/Yaser-Amiri/netdata-ssh-module): Monitor failed authentication requests of an SSH server.
+
+## Etc
+
+- [checks.plugin](checks.plugin/README.md): A debugging collector, disabled by default.
+- [charts.d example](charts.d.plugin/example/README.md): An example `charts.d` collector.
+- [python.d example](python.d.plugin/example/README.md): An example `python.d` collector.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FCOLLECTORS&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/Makefile.am b/collectors/Makefile.am
index 743102570..460612c68 100644
--- a/collectors/Makefile.am
+++ b/collectors/Makefile.am
@@ -24,6 +24,7 @@ SUBDIRS = \
python.d.plugin \
slabinfo.plugin \
statsd.plugin \
+ ebpf.plugin \
tc.plugin \
$(NULL)
diff --git a/collectors/Makefile.in b/collectors/Makefile.in
deleted file mode 100644
index a6370673e..000000000
--- a/collectors/Makefile.in
+++ /dev/null
@@ -1,731 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
- ctags-recursive dvi-recursive html-recursive info-recursive \
- install-data-recursive install-dvi-recursive \
- install-exec-recursive install-html-recursive \
- install-info-recursive install-pdf-recursive \
- install-ps-recursive install-recursive installcheck-recursive \
- installdirs-recursive pdf-recursive ps-recursive \
- tags-recursive uninstall-recursive
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
- distclean-recursive maintainer-clean-recursive
-am__recursive_targets = \
- $(RECURSIVE_TARGETS) \
- $(RECURSIVE_CLEAN_TARGETS) \
- $(am__extra_recursive_targets)
-AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
- distdir
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-# Read a list of newline-separated strings from the standard input,
-# and print each of them once, without duplicates. Input order is
-# *not* preserved.
-am__uniquify_input = $(AWK) '\
- BEGIN { nonempty = 0; } \
- { items[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in items) print i; }; } \
-'
-# Make sure the list of sources is unique. This is necessary because,
-# e.g., the same source file might be shared among _SOURCES variables
-# for different programs/libraries.
-am__define_uniq_tagged_files = \
- list='$(am__tagged_files)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | $(am__uniquify_input)`
-ETAGS = etags
-CTAGS = ctags
-DIST_SUBDIRS = $(SUBDIRS)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-am__relativize = \
- dir0=`pwd`; \
- sed_first='s,^\([^/]*\)/.*$$,\1,'; \
- sed_rest='s,^[^/]*/*,,'; \
- sed_last='s,^.*/\([^/]*\)$$,\1,'; \
- sed_butlast='s,/*[^/]*$$,,'; \
- while test -n "$$dir1"; do \
- first=`echo "$$dir1" | sed -e "$$sed_first"`; \
- if test "$$first" != "."; then \
- if test "$$first" = ".."; then \
- dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
- dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
- else \
- first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
- if test "$$first2" = "$$first"; then \
- dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
- else \
- dir2="../$$dir2"; \
- fi; \
- dir0="$$dir0"/"$$first"; \
- fi; \
- fi; \
- dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
- done; \
- reldir="$$dir2"
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-SUBDIRS = \
- plugins.d \
- apps.plugin \
- cgroups.plugin \
- charts.d.plugin \
- checks.plugin \
- cups.plugin \
- diskspace.plugin \
- fping.plugin \
- ioping.plugin \
- freebsd.plugin \
- freeipmi.plugin \
- idlejitter.plugin \
- macos.plugin \
- nfacct.plugin \
- xenstat.plugin \
- perf.plugin \
- node.d.plugin \
- proc.plugin \
- python.d.plugin \
- slabinfo.plugin \
- statsd.plugin \
- tc.plugin \
- $(NULL)
-
-usercustompluginsconfigdir = $(configdir)/custom-plugins.d
-usergoconfigdir = $(configdir)/go.d
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-recursive
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-
-# This directory's subdirectories are mostly independent; you can cd
-# into them and run 'make' without going through this Makefile.
-# To change the values of 'make' variables: instead of editing Makefiles,
-# (1) if the variable is set in 'config.status', edit 'config.status'
-# (which will cause the Makefiles to be regenerated when you run 'make');
-# (2) otherwise, pass the desired values on the 'make' command line.
-$(am__recursive_targets):
- @fail=; \
- if $(am__make_keepgoing); then \
- failcom='fail=yes'; \
- else \
- failcom='exit 1'; \
- fi; \
- dot_seen=no; \
- target=`echo $@ | sed s/-recursive//`; \
- case "$@" in \
- distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
- *) list='$(SUBDIRS)' ;; \
- esac; \
- for subdir in $$list; do \
- echo "Making $$target in $$subdir"; \
- if test "$$subdir" = "."; then \
- dot_seen=yes; \
- local_target="$$target-am"; \
- else \
- local_target="$$target"; \
- fi; \
- ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
- || eval $$failcom; \
- done; \
- if test "$$dot_seen" = "no"; then \
- $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
- fi; test -z "$$fail"
-
-ID: $(am__tagged_files)
- $(am__define_uniq_tagged_files); mkid -fID $$unique
-tags: tags-recursive
-TAGS: tags
-
-tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
- set x; \
- here=`pwd`; \
- if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
- include_option=--etags-include; \
- empty_fix=.; \
- else \
- include_option=--include; \
- empty_fix=; \
- fi; \
- list='$(SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
- test ! -f $$subdir/TAGS || \
- set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
- fi; \
- done; \
- $(am__define_uniq_tagged_files); \
- shift; \
- if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
- test -n "$$unique" || unique=$$empty_fix; \
- if test $$# -gt 0; then \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- "$$@" $$unique; \
- else \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- $$unique; \
- fi; \
- fi
-ctags: ctags-recursive
-
-CTAGS: ctags
-ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
- $(am__define_uniq_tagged_files); \
- test -z "$(CTAGS_ARGS)$$unique" \
- || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
- $$unique
-
-GTAGS:
- here=`$(am__cd) $(top_builddir) && pwd` \
- && $(am__cd) $(top_srcdir) \
- && gtags -i $(GTAGS_ARGS) "$$here"
-cscopelist: cscopelist-recursive
-
-cscopelist-am: $(am__tagged_files)
- list='$(am__tagged_files)'; \
- case "$(srcdir)" in \
- [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
- *) sdir=$(subdir)/$(srcdir) ;; \
- esac; \
- for i in $$list; do \
- if test -f "$$i"; then \
- echo "$(subdir)/$$i"; \
- else \
- echo "$$sdir/$$i"; \
- fi; \
- done >> $(top_builddir)/cscope.files
-
-distclean-tags:
- -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
- @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
- $(am__make_dryrun) \
- || test -d "$(distdir)/$$subdir" \
- || $(MKDIR_P) "$(distdir)/$$subdir" \
- || exit 1; \
- dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
- $(am__relativize); \
- new_distdir=$$reldir; \
- dir1=$$subdir; dir2="$(top_distdir)"; \
- $(am__relativize); \
- new_top_distdir=$$reldir; \
- echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
- echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
- ($(am__cd) $$subdir && \
- $(MAKE) $(AM_MAKEFLAGS) \
- top_distdir="$$new_top_distdir" \
- distdir="$$new_distdir" \
- am__remove_distdir=: \
- am__skip_length_check=: \
- am__skip_mode_fix=: \
- distdir) \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-recursive
-all-am: Makefile $(DATA)
-installdirs: installdirs-recursive
-installdirs-am:
-install: install-recursive
-install-exec: install-exec-recursive
-install-data: install-data-recursive
-uninstall: uninstall-recursive
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-recursive
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-recursive
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-recursive
- -rm -f Makefile
-distclean-am: clean-am distclean-generic distclean-tags
-
-dvi: dvi-recursive
-
-dvi-am:
-
-html: html-recursive
-
-html-am:
-
-info: info-recursive
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-recursive
-
-install-dvi-am:
-
-install-exec-am: install-exec-local
-
-install-html: install-html-recursive
-
-install-html-am:
-
-install-info: install-info-recursive
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-recursive
-
-install-pdf-am:
-
-install-ps: install-ps-recursive
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-recursive
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-recursive
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-recursive
-
-pdf-am:
-
-ps: ps-recursive
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: $(am__recursive_targets) install-am install-strip
-
-.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
- check-am clean clean-generic cscopelist-am ctags ctags-am \
- distclean distclean-generic distclean-tags distdir dvi dvi-am \
- html html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-exec-local install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- installdirs-am maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags \
- tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(usercustompluginsconfigdir)
- $(INSTALL) -d $(DESTDIR)$(usergoconfigdir)
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/QUICKSTART.md b/collectors/QUICKSTART.md
new file mode 100644
index 000000000..a691ffc4c
--- /dev/null
+++ b/collectors/QUICKSTART.md
@@ -0,0 +1,125 @@
+<!--
+title: "Collectors quickstart"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/QUICKSTART.md
+-->
+
+# Collectors quickstart
+
+In this quickstart guide, you'll learn how to enable collectors so you can get metrics from your favorite applications
+and services.
+
+This guide will not cover advanced collector features, such as enabling/disabling entire plugins,
+
+## What's in this quickstart guide
+
+- [Find the collector for your application or service](#find-the-collector-for-your-application-or-service)
+- [Configure your application or service for monitoring](#configure-your-application-or-service-for-monitoring)
+- [Edit the collector's configuration file](#edit-the-collectors-configuration-file)
+- [Enable the collector](#enable-the-collector)
+
+## Find the collector for your application or service
+
+Netdata has _pre-installed_ collectors for hundreds of popular applications and services. You don't need to install
+anything to collect metrics from many popular services, like Nginx web servers, MySQL/MariaDB databases, and much more.
+
+To find whether Netdata has a pre-installed collector for your favorite app/service, check out our [collector support
+list](COLLECTORS.md). The only exception is the [third-party collectors](COLLECTORS.md#third-party-plugins), which
+you do need to install yourself. However, this quickstart guide will focus on pre-installed collectors.
+
+When you find a collector you're interested in, take note of its orchestrator. These are in the headings above each
+table, and there are four: Bash, Go, Node, and Python. They go by their respective names: `charts.d`, `go.d`, `node.d`,
+and `python.d`.
+
+> If there is a collector written in both Go and Python, it's better to choose the Go-based version, as we will
+> eventually deprecate most Python-based collectors.
+
+From here on out, this quickstart guide will use the [Nginx
+collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) as an example to showcase the
+process of configuring and enabling one of Netdata's pre-installed collectors.
+
+## Configure your application or service for monitoring
+
+Every collector's documentation comes with instructions on how to configure your app/service to make it available to
+Netdata's collector. Our [collector support list](COLLECTORS.md) contains links to each collector's documentation page
+so you can learn more.
+
+For example, the [Nginx collector
+documentation](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) states that your Nginx
+installation must have the `stub_status` module configured correctly, in addition to an active `stub_status/` page, for
+Netdata to monitor it. You can confirm whether you have the module enabled with the following command:
+
+```bash
+nginx -V 2>&1 | grep -o with-http_stub_status_module
+```
+
+If this command returns nothing, you'll need to [enable the `stub_status`
+module](https://www.nginx.com/blog/monitoring-nginx/).
+
+Next, edit your `/etc/nginx/sites-enabled/default` file to include a `location` block with the following, which enables
+the `stub_status` page:
+
+```conf
+server {
+ ...
+
+ location /nginx_status {
+ stub_status;
+ }
+}
+```
+
+At this point, your Nginx installation is fully configured and ready for Netdata to monitor it. Next, you'll configure
+your collector.
+
+## Edit the collector's configuration file
+
+This step may not be required based on how you configured your app/service, as each collector comes with a few
+pre-configured jobs that look for the app/service in common and expected locations. For example, the Nginx collector
+looks for a `stub_status` page at `http://localhost/stub_status` and `http://127.0.0.1/stub_status`, which allows it to
+auto-detect almost all local Nginx web servers.
+
+Despite Netdata's auto-detection capabilities, it's important to know how to edit collector configuration files.
+
+You should always edit configuration files with the `edit-config` script that comes with every installation of Netdata.
+To edit a collector configuration file, navigate to your [Netdata configuration directory](/docs/configure/nodes.md).
+Launch `edit-config` with the path to the collector's configuration file.
+
+How do you find that path to the collector's configuration file? Look under the **Configuration** heading in the
+collector's documentation. Each file contains a short code block with the relevant command.
+
+For example, the [Nginx collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) has its
+configuration file at `go.d/nginx.conf`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config go.d/nginx.conf
+```
+
+This file contains all of the possible job parameters to help you monitor Nginx in all sorts of complex deployments. At
+the bottom of the file is a `[JOB]` section, which contains the two default jobs. Configure these as needed, using those
+parameters as a reference, to configure the collector.
+
+## Enable the collector
+
+Most collectors are enabled and will auto-detect their app/service without manual configuration. However, you need to
+restart Netdata to trigger the auto-detection process.
+
+To restart Netdata on most systems, use `service netdata restart`. For other systems, see the [other restart
+methods](/docs/getting-started.md#start-stop-and-restart-netdata).
+
+Open Netdata's dashboard in your browser, or refresh the page if you already have it open. You should now see a new
+entry in the menu and new interactive charts!
+
+## What's next?
+
+Collector not working? Learn about collector troubleshooting in our [collector
+reference](REFERENCE.md#troubleshoot-a-collector).
+
+View our [collectors guides](/collectors/README.md#guides) to get specific instructions on enabling new and
+popular collectors.
+
+Finally, learn more advanced collector features, such as disabling plugins or developing a custom collector, in our
+[internal plugin API](/collectors/REFERENCE.md#internal-plugins-api) or our [external plugin
+docs](/collectors/plugins.d/README.md).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FQUICKSTART&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/README.md b/collectors/README.md
index cc1a1e453..a37a7e890 100644
--- a/collectors/README.md
+++ b/collectors/README.md
@@ -1,122 +1,52 @@
-# Data collection plugins
-
-Netdata supports **internal** and **external** data collection plugins:
-
-- **internal** plugins are written in `C` and run as threads inside the `netdata` daemon.
-
-- **external** plugins may be written in any computer language and are spawn as independent long-running processes by the `netdata` daemon.
- They communicate with the `netdata` daemon via `pipes` (`stdout` communication).
-
-To minimize the number of processes spawn for data collection, Netdata also supports **plugin orchestrators**.
-
-- **plugin orchestrators** are external plugins that do not collect any data by themeselves.
- Instead they support data collection **modules** written in the language of the orchestrator.
- Usually the orchestrator provides a higher level abstraction, making it ideal for writing new
- data collection modules with the minimum of code.
-
- Currently Netdata provides plugin orchestrators
- BASH v4+ [charts.d.plugin](charts.d.plugin/),
- node.js [node.d.plugin](node.d.plugin/) and
- python v2+ (including v3) [python.d.plugin](python.d.plugin/).
-
-## Netdata Plugins
-
-|plugin|lang|O/S|runs as|modular|description|
-|:----:|:--:|:-:|:-----:|:-----:|:----------|
-|[apps.plugin](apps.plugin/)|`C`|linux, freebsd|external|-|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.|
-|[cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems|
-|[charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+.|
-|[checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled)|
-|[cups.plugin](cups.plugin/)|`C`|any|external|-|monitors **CUPS**|
-|[diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points|
-|[fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.|
-|[ioping.plugin](ioping.plugin/)|`C`|any|external|-|measures disk read/write latency.|
-|[freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems|
-|[freeipmi.plugin](freeipmi.plugin/)|`C`|linux, freebsd|external|-|collects metrics from enterprise hardware sensors, on Linux and FreeBSD servers.|
-|[idlejitter.plugin](idlejitter.plugin/)|`C`|any|internal|-|measures CPU latency and jitter on all operating systems|
-|[macos.plugin](macos.plugin/)|`C`|macos|internal|yes|collects resource usage and performance data on MacOS systems|
-|[nfacct.plugin](nfacct.plugin/)|`C`|linux|external|-|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`|
-|[xenstat.plugin](xenstat.plugin/)|`C`|linux|external|-|collects XenServer and XCP-ng metrics using `libxenstat`|
-|[perf.plugin](perf.plugin/)|`C`|linux|external|-|collects CPU performance metrics using performance monitoring units (PMU).|
-|[node.d.plugin](node.d.plugin/)|`node.js`|any|external|yes|a **plugin orchestrator** for data collection modules written in `node.js`.|
-|[plugins.d](plugins.d/)|`C`|any|internal|-|implements the **external plugins** API and serves external plugins|
-|[proc.plugin](proc.plugin/)|`C`|linux|internal|yes|collects resource usage and performance data on Linux systems|
-|[python.d.plugin](python.d.plugin/)|`python` v2+|any|external|yes|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).|
-|[slabinfo.plugin](slabinfo.plugin/)|`C`|linux|external|-|collects kernel SLAB details on Linux systems|
-|[statsd.plugin](statsd.plugin/)|`C`|any|internal|-|implements a high performance **statsd** server for Netdata|
-|[tc.plugin](tc.plugin/)|`C`|linux|internal|-|collects traffic QoS metrics (`tc`) of Linux network interfaces|
-
-## Enabling and Disabling plugins
-
-Each plugin can be enabled or disabled via `netdata.conf`, section `[plugins]`.
-
-At this section there a list of all the plugins with a boolean setting to enable them or disable them.
-
-The exception is `statsd.plugin` that has its own `[statsd]` section.
-
-Once a plugin is enabled, consult the page of each plugin for additional configuration options.
-
-All **external plugins** are managed by [plugins.d](plugins.d/), which provides additional management options.
-
-### Internal Plugins
-
-Each of the internal plugins runs as a thread inside the `netdata` daemon.
-Once this thread has started, the plugin may spawn additional threads according to its design.
-
-#### Internal Plugins API
-
-The internal data collection API consists of the following calls:
-
-```c
-collect_data() {
- // collect data here (one iteration)
-
- collected_number collected_value = collect_a_value();
-
- // give the metrics to Netdata
-
- static RRDSET *st = NULL; // the chart
- static RRDDIM *rd = NULL; // a dimension attached to this chart
-
- if(unlikely(!st)) {
- // we haven't created this chart before
- // create it now
- st = rrdset_create_localhost(
- "type"
- , "id"
- , "name"
- , "family"
- , "context"
- , "Chart Title"
- , "units"
- , "plugin-name"
- , "module-name"
- , priority
- , update_every
- , chart_type
- );
-
- // attach a metric to it
- rd = rrddim_add(st, "id", "name", multiplier, divider, algorithm);
- }
- else {
- // this chart is already created
- // let Netdata know we start a new iteration on it
- rrdset_next(st);
- }
-
- // give the collected value(s) to the chart
- rrddim_set_by_pointer(st, rd, collected_value);
-
- // signal Netdata we are done with this iteration
- rrdset_done(st);
-}
-```
-
-Of course, Netdata has a lot of libraries to help you also in collecting the metrics. The best way to find your way through this, is to examine what other similar plugins do.
-
-### External Plugins
-
-**External plugins** use the API and are managed by [plugins.d](plugins.d/).
+<!--
+title: "Collecting metrics"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/README.md
+-->
+
+# Collecting metrics
+
+Netdata can collect metrics from hundreds of different sources, be they internal data created by the system itself, or
+external data created by services or applications. To see _all_ of the sources Netdata collects from, view our [list of
+supported collectors](/collectors/COLLECTORS.md), and then view our [quickstart guide](/collectors/QUICKSTART.md) to get
+up-and-running.
+
+There are two essential points to understand about how collecting metrics works in Netdata:
+
+- All collectors are **installed by default** with every installation of Netdata. You do not need to install
+ collectors manually to collect metrics from new sources.
+- Upon startup, Netdata will **auto-detect** any application or service that has a
+ [collector](/collectors/COLLECTORS.md), as long as both the collector and the app/service are configured correctly.
+
+Most users will want to enable a new Netdata collector for their app/service. For those details, see our [quickstart
+guide](/collectors/QUICKSTART.md).
+
+## Take your next steps with collectors
+
+[Collectors quickstart](/collectors/QUICKSTART.md)
+
+[Supported collectors list](/collectors/COLLECTORS.md)
+
+[Collectors configuration reference](/collectors/REFERENCE.md)
+
+## Guides
+
+[Monitor Nginx or Apache web server log files with Netdata](/docs/guides/collect-apache-nginx-web-logs.md)
+
+[Monitor CockroachDB metrics with Netdata](/docs/guides/monitor-cockroachdb.md)
+
+[Monitor Unbound DNS servers with Netdata](/docs/guides/collect-unbound-metrics.md)
+
+[Monitor a Hadoop cluster with Netdata](/docs/guides/monitor-hadoop-cluster.md)
+
+## Related features
+
+**[Dashboards](/web/README.md)**: Visualize your newly-collect metrics in real-time using Netdata's [built-in
+dashboard](/web/gui/README.md).
+
+**[Backends](/backends/README.md)**: Extend our built-in [database engine](/database/engine/README.md), which supports
+long-term metrics storage, by archiving metrics to like Graphite, Prometheus, MongoDB, TimescaleDB, and more.
+
+**[Exporting](/exporting/README.md)**: An experimental refactoring of our backends system with a modular system and
+support for exporting metrics to multiple systems simultaneously.
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/REFERENCE.md b/collectors/REFERENCE.md
new file mode 100644
index 000000000..9c6f0a61e
--- /dev/null
+++ b/collectors/REFERENCE.md
@@ -0,0 +1,186 @@
+<!--
+title: "Collectors configuration reference"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/REFERENCE.md
+-->
+
+# Collectors configuration reference
+
+Welcome to the collector configuration reference guide.
+
+This guide contains detailed information about enabling/disabling plugins or modules, in addition a quick reference to
+the internal plugins API.
+
+To learn the basics of collecting metrics from other applications and services, see the [collector
+quickstart](QUICKSTART.md).
+
+## Netdata's collector architecture
+
+Netdata has an intricate system for organizing and managing its collectors. **Collectors** are the processes/programs
+that actually gather metrics from various sources. Collectors are organized by **plugins**, which help manage all the
+independent processes in a variety of programming languages based on their purpose and performance requirements.
+**Modules** are a type of collector, used primarily to connect to external applications, such as an Nginx web server or
+MySQL database, among many others.
+
+For most users, enabling individual collectors for the application/service you're interested in is far more important
+than knowing which plugin it uses. See our [collectors list](/collectors/COLLECTORS.md) to see whether your favorite app/service has
+a collector, and then read the [collectors quickstart](/collectors/QUICKSTART.md) and the documentation for that specific collector
+to figure out how to enable it.
+
+There are three types of plugins:
+
+- **Internal** plugins organize collectors that gather metrics from `/proc`, `/sys` and other Linux kernel sources.
+ They are written in `C`, and run as threads within the Netdata daemon.
+- **External** plugins organize collectors that gather metrics from external processes, such as a MySQL database or
+ Nginx web server. They can be written in any language, and the `netdata` daemon spawns them as long-running
+ independent processes. They communicate with the daemon via pipes.
+- **Plugin orchestrators**, which are external plugins that instead support a number of **modules**. Modules are a
+ type of collector. We have a few plugin orchestrators available for those who want to develop their own collectors,
+ but focus most of our efforts on the [Go plugin](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/).
+
+## Enable, configure, and disable modules
+
+Most collector modules come with **auto-detection**, configured to work out-of-the-box on popular operating systems with
+the default settings.
+
+However, there are cases that auto-detection fails. Usually, the reason is that the applications to be monitored do not
+allow Netdata to connect. In most of the cases, allowing the user `netdata` from `localhost` to connect and collect
+metrics, will automatically enable data collection for the application in question (it will require a Netdata restart).
+
+View our [collectors quickstart](/collectors/QUICKSTART.md) for explicit details on enabling and configuring collector modules.
+
+## Troubleshoot a collector
+
+First, navigate to your plugins directory, which is usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case
+on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the plugins directory,
+switch to the `netdata` user.
+
+```bash
+cd /usr/libexec/netdata/plugins.d/
+sudo su -s /bin/bash netdata
+```
+
+The next step is based on the collector's orchestrator. You can figure out which orchestrator the collector uses by
+
+uses either
+by viewing the [collectors list](COLLECTORS.md) and referencing the _configuration file_ field. For example, if that
+field contains `go.d`, that collector uses the Go orchestrator.
+
+```bash
+# Go orchestrator (go.d.plugin)
+./go.d.plugin -d -m <MODULE_NAME>
+
+# Python orchestrator (python.d.plugin)
+./python.d.plugin <MODULE_NAME> debug trace
+
+# Node orchestrator (node.d.plugin)
+./node.d.plugin debug 1 <MODULE_NAME>
+
+# Bash orchestrator (bash.d.plugin)
+./charts.d.plugin debug 1 <MODULE_NAME>
+```
+
+The output from the relevant command will provide valuable troubleshooting information. If you can't figure out how to
+enable the collector using the details from this output, feel free to [create an issue on our
+GitHub](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage&template=bug_report.md) to get some
+help from our collectors experts.
+
+## Enable and disable plugins
+
+You can enable or disable individual plugins by opening `netdata.conf` and scrolling down to the `[plugins]` section.
+This section features a list of Netdata's plugins, with a boolean setting to enable or disable them. The exception is
+`statsd.plugin`, which has its own `[statsd]` section. Your `[plugins]` section should look similar to this:
+
+```conf
+[plugins]
+ # PATH environment variable = /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/var/lib/snapd/snap/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin
+ # PYTHONPATH environment variable =
+ # proc = yes
+ # diskspace = yes
+ # cgroups = yes
+ # tc = yes
+ # idlejitter = yes
+ # enable running new plugins = yes
+ # check for new plugins every = 60
+ # slabinfo = no
+ # fping = yes
+ # ioping = yes
+ # node.d = yes
+ # python.d = yes
+ # go.d = yes
+ # apps = yes
+ # perf = yes
+ # charts.d = yes
+```
+
+By default, most plugins are enabled, so you don't need to enable them explicitly to use their collectors. To enable or
+disable any specific plugin, remove the comment (`#`) and change the boolean setting to `yes` or `no`.
+
+All **external plugins** are managed by [plugins.d](plugins.d/), which provides additional management options.
+
+## Internal plugins
+
+Each of the internal plugins runs as a thread inside the `netdata` daemon. Once this thread has started, the plugin may
+spawn additional threads according to its design.
+
+### Internal plugins API
+
+The internal data collection API consists of the following calls:
+
+```c
+collect_data() {
+ // collect data here (one iteration)
+
+ collected_number collected_value = collect_a_value();
+
+ // give the metrics to Netdata
+
+ static RRDSET *st = NULL; // the chart
+ static RRDDIM *rd = NULL; // a dimension attached to this chart
+
+ if(unlikely(!st)) {
+ // we haven't created this chart before
+ // create it now
+ st = rrdset_create_localhost(
+ "type"
+ , "id"
+ , "name"
+ , "family"
+ , "context"
+ , "Chart Title"
+ , "units"
+ , "plugin-name"
+ , "module-name"
+ , priority
+ , update_every
+ , chart_type
+ );
+
+ // attach a metric to it
+ rd = rrddim_add(st, "id", "name", multiplier, divider, algorithm);
+ }
+ else {
+ // this chart is already created
+ // let Netdata know we start a new iteration on it
+ rrdset_next(st);
+ }
+
+ // give the collected value(s) to the chart
+ rrddim_set_by_pointer(st, rd, collected_value);
+
+ // signal Netdata we are done with this iteration
+ rrdset_done(st);
+}
+```
+
+Of course, Netdata has a lot of libraries to help you also in collecting the metrics. The best way to find your way
+through this, is to examine what other similar plugins do.
+
+## External Plugins
+
+**External plugins** use the API and are managed by [plugins.d](plugins.d/).
+
+## Write a custom collector
+
+You can add custom collectors by following the [external plugins documentation](../collectors/plugins.d/).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2REFERENCE&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/all.h b/collectors/all.h
index f58f0dcd5..153fce931 100644
--- a/collectors/all.h
+++ b/collectors/all.h
@@ -282,6 +282,8 @@
#define NETDATA_CHART_PRIO_TC_QOS_TOCKENS 7030
#define NETDATA_CHART_PRIO_TC_QOS_CTOCKENS 7040
+// Infiniband
+#define NETDATA_CHART_PRIO_INFINIBAND 7100
// Netfilter
@@ -310,11 +312,17 @@
#define NETDATA_CHART_PRIO_MDSTAT_SPEED 9006
// Linux Power Supply
+
#define NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY 9500 // 4 charts per power supply
#define NETDATA_CHART_PRIO_POWER_SUPPLY_CHARGE 9501
#define NETDATA_CHART_PRIO_POWER_SUPPLY_ENERGY 9502
#define NETDATA_CHART_PRIO_POWER_SUPPLY_VOLTAGE 9503
+
+// Wireless
+
+#define NETDATA_CHART_PRIO_WIRELESS_IFACE 7110
+
// CGROUPS
#define NETDATA_CHART_PRIO_CGROUPS_SYSTEMD 19000 // many charts
diff --git a/collectors/apps.plugin/Makefile.in b/collectors/apps.plugin/Makefile.in
deleted file mode 100644
index e688b4240..000000000
--- a/collectors/apps.plugin/Makefile.in
+++ /dev/null
@@ -1,576 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/apps.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_libconfig_DATA) \
- $(dist_noinst_DATA) $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(libconfigdir)"
-DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-dist_libconfig_DATA = \
- apps_groups.conf \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(libconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_libconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_libconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_libconfigDATA install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_libconfigDATA
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md
index 1b682bc65..d10af1cdd 100644
--- a/collectors/apps.plugin/README.md
+++ b/collectors/apps.plugin/README.md
@@ -1,3 +1,9 @@
+<!--
+title: "apps.plugin"
+sidebar_label: "Application monitoring (apps.plugin)"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/apps.plugin/README.md
+-->
+
# apps.plugin
`apps.plugin` breaks down system resource usage to **processes**, **users** and **user groups**.
@@ -7,7 +13,7 @@ for every process found running.
Since Netdata needs to present this information in charts and track them through time,
instead of presenting a `top` like list, `apps.plugin` uses a pre-defined list of **process groups**
-to which it assigns all running processes. This list is [customizable](apps_groups.conf) and Netdata
+to which it assigns all running processes. This list is customizable via `apps_groups.conf`, and Netdata
ships with a good default for most cases (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
So, `apps.plugin` builds a process tree (much like `ps fax` does in Linux), and groups
@@ -15,7 +21,7 @@ processes together (evaluating both child and parent processes) so that the resu
a predefined set of members (of course, only process groups found running are reported).
> If you find that `apps.plugin` categorizes standard applications as `other`, we would be
-> glad to accept pull requests improving the [defaults](apps_groups.conf) shipped with Netdata.
+> glad to accept pull requests improving the defaults shipped with Netdata in `apps_groups.conf`.
Unlike traditional process monitoring tools (like `top`), `apps.plugin` is able to account the resource
utilization of exit processes. Their utilization is accounted at their currently running parents.
@@ -32,35 +38,38 @@ that fork/spawn other short lived processes hundreds of times per second.
Each of these sections provides the same number of charts:
-- CPU Utilization
+- CPU utilization (`apps.cpu`)
- Total CPU usage
- - User / System CPU usage
+ - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`)
- Disk I/O
- - Physical Reads / Writes
- - Logical Reads / Writes
- - Open Unique Files (if a file is found open multiple times, it is counted just once)
+ - Physical reads/writes (`apps.preads`/`apps.pwrites`)
+ - Logical reads/writes (`apps.lreads`/`apps.lwrites`)
+ - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`)
- Memory
- - Real Memory Used (non shared)
- - Virtual Memory Allocated
- - Minor Page Faults (i.e. memory activity)
+ - Real Memory Used (non-shared, `apps.mem`)
+ - Virtual Memory Allocated (`apps.vmem`)
+ - Minor page faults (i.e. memory activity, `apps.minor_faults`)
- Processes
- - Threads Running
- - Processes Running
- - Pipes Open
- - Carried Over Uptime (since the Netdata restart)
- - Minimum Uptime
- - Average Uptime
- - Maximum Uptime
-
-- Swap Memory
- - Swap Memory Used
- - Major Page Faults (i.e. swap activity)
+ - Threads running (`apps.threads`)
+ - Processes running (`apps.processes`)
+ - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`)
+ - Minimum uptime (`apps.uptime_min`)
+ - Average uptime (`apps.uptime_average`)
+ - Maximum uptime (`apps.uptime_max`)
+ - Pipes open (`apps.pipes`)
+- Swap memory
+ - Swap memory used (`apps.swap`)
+ - Major page faults (i.e. swap activity, `apps.major_faults`)
- Network
- - Sockets Open
+ - Sockets open (`apps.sockets`)
+
+In addition, if the [eBPF collector](/collectors/ebpf.plugin/README.md) is running, your dashboard will also show an
+additional [list of charts](/collectors/ebpf.plugin/README.md#integration-with-appsplugin) using low-level Linux
+metrics.
The above are reported:
-- For **Applications** per [target configured](apps_groups.conf).
+- For **Applications** per target configured.
- For **Users** per username or UID (when the username is not available).
- For **User Groups** per groupname or GID (when groupname is not available).
@@ -90,8 +99,7 @@ its CPU resources will be cut in half, and data collection will be once every 2
## Configuration
-The configuration file is `/etc/netdata/apps_groups.conf` (the default is [here](apps_groups.conf)).
-To edit it on your system run `/etc/netdata/edit-config apps_groups.conf`.
+The configuration file is `/etc/netdata/apps_groups.conf`. To edit it on your system, run `/etc/netdata/edit-config apps_groups.conf`.
The configuration file works accepts multiple lines, each having this format:
@@ -149,6 +157,15 @@ There are a few command line options you can pass to `apps.plugin`. The list of
command options = without-users without-groups
```
+### Integration with eBPF
+
+If you don't see charts under the **eBPF syscall** or **eBPF net** sections, you should edit your
+[`ebpf.conf`](/collectors/ebpf.plugin/README.md#ebpf-programs) file to ensure the eBPF program is enabled.
+
+Also see our [guide on troubleshooting apps with eBPF
+metrics](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md) for ideas on how to interpret these charts in a
+few scenarios.
+
## Permissions
`apps.plugin` requires additional privileges to collect all the information it needs.
@@ -217,7 +234,7 @@ Examples below for process group `sql`:
- Open Pipes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pipes&dimensions=sql&value_color=green=0%7Cred)
- Open Sockets ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.sockets&dimensions=sql&value_color=green%3E=3%7Cred)
-For more information about badges check [Generating Badges](../../web/api/badges)
+For more information about badges check [Generating Badges](/web/api/badges/README.md)
## Comparison with console tools
@@ -351,9 +368,7 @@ So, the `ssh` session is using 95% CPU time.
Why `ssh`?
-`apps.plugin` groups all processes based on its configuration file
-[`/etc/netdata/apps_groups.conf`](apps_groups.conf)
-(to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
+`apps.plugin` groups all processes based on its configuration file.
The default configuration has nothing for `bash`, but it has for `sshd`, so Netdata accumulates
all ssh sessions to a dimension on the charts, called `ssh`. This includes all the processes in
the process tree of `sshd`, **including the exited children**.
@@ -368,10 +383,9 @@ the process tree of `sshd`, **including the exited children**.
Netdata reads `/proc/<pid>/stat` for all processes, once per second and extracts `utime` and
`stime` (user and system cpu utilization), much like all the console tools do.
-But it [also extracts `cutime` and `cstime`](https://github.com/netdata/netdata/blob/62596cc6b906b1564657510ca9135c08f6d4cdda/src/apps_plugin.c#L636-L642)
-that account the user and system time of the exit children of each process. By keeping a map in
-memory of the whole process tree, it is capable of assigning the right time to every process,
-taking into account all its exited children.
+But it also extracts `cutime` and `cstime` that account the user and system time of the exit children of each process.
+By keeping a map in memory of the whole process tree, it is capable of assigning the right time to every process, taking
+into account all its exited children.
It is tricky, since a process may be running for 1 hour and once it exits, its parent should not
receive the whole 1 hour of cpu time in just 1 second - you have to subtract the cpu time that has
diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf
index 9d6341004..9bf928712 100644
--- a/collectors/apps.plugin/apps_groups.conf
+++ b/collectors/apps.plugin/apps_groups.conf
@@ -86,19 +86,23 @@ fping: fping
ioping: ioping
go.d.plugin: *go.d.plugin*
slabinfo.plugin: slabinfo.plugin
+ebpf.plugin: *ebpf.plugin*
+
+# agent-service-discovery
+agent_sd: agent_sd
# -----------------------------------------------------------------------------
# authentication/authorization related servers
-auth: radius* openldap* ldap* slapd
+auth: radius* openldap* ldap* slapd authelia
fail2ban: fail2ban*
# -----------------------------------------------------------------------------
# web/ftp servers
-httpd: apache* httpd nginx* lighttpd
+httpd: apache* httpd nginx* lighttpd hiawatha
proxy: squid* c-icap squidGuard varnish*
-php: php*
+php: php* lsphp*
ftpd: proftpd in.tftpd vsftpd
uwsgi: uwsgi
unicorn: *unicorn*
@@ -107,14 +111,15 @@ puma: *puma*
# -----------------------------------------------------------------------------
# database servers
-sql: mysqld* mariad* postgres* postmaster* oracle_* ora_*
+sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr
nosql: mongod redis* memcached *couchdb*
-timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain*
+timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* influxd*
+columndb: clickhouse-server*
# -----------------------------------------------------------------------------
# email servers
-email: dovecot imapd pop3d amavis* master zmstat* zmmailboxdmgr qmgr oqmgr saslauthd opendkim clamd freshclam unbound tlsmgr postfwd2 postscreen postfix smtp* lmtp* sendmail
+email: dovecot imapd pop3d amavis* master zmstat* zmmailboxdmgr qmgr oqmgr saslauthd opendkim clamd freshclam tlsmgr postfwd2 postscreen postfix smtp* lmtp* sendmail
# -----------------------------------------------------------------------------
# network, routing, VPN
@@ -155,8 +160,8 @@ azure: mdsd *waagent* *omiserver* *omiagent* hv_kvp_daemon hv_vss_daemon *auoms*
# -----------------------------------------------------------------------------
# storage, file systems and file servers
-ceph: ceph-mds ceph-mgr ceph-mon ceph-osd radosgw* rbd-*
-samba: smbd nmbd winbindd
+ceph: ceph-* ceph_* radosgw* rbd-* cephfs-* osdmaptool crushtool
+samba: smbd nmbd winbindd ctdbd ctdb-* ctdb_*
nfs: rpcbind rpc.* nfs*
zfs: spl_* z_* txg_* zil_* arc_* l2arc*
btrfs: btrfs*
@@ -200,7 +205,7 @@ dhcp: *dhcp*
# -----------------------------------------------------------------------------
# name servers and clients
-named: named rncd dig
+dns: named unbound nsd pdns_server knotd gdnsd yadifad dnsmasq systemd-resolve* pihole*
dnsdist: dnsdist
# -----------------------------------------------------------------------------
@@ -212,7 +217,7 @@ build: git gdb valgrind*
# -----------------------------------------------------------------------------
# antivirus
-antivirus: clam* *clam
+antivirus: clam* *clam imunify360*
# -----------------------------------------------------------------------------
# torrent clients
@@ -222,7 +227,7 @@ torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent*
# -----------------------------------------------------------------------------
# backup servers and clients
-backup: rsync bacula*
+backup: rsync lsyncd bacula* borg rclone
# -----------------------------------------------------------------------------
# cron
@@ -238,7 +243,7 @@ ups: upsmon upsd */nut/*
# media players, servers, clients
media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd
-media: mpd minidlnad mt-daapd avahi* Plex*
+media: mpd minidlnad mt-daapd avahi* Plex* jellyfin squeeze* jackett Ombi
# -----------------------------------------------------------------------------
# java applications
diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c
index 93159406e..0cfeeacd4 100644
--- a/collectors/apps.plugin/apps_plugin.c
+++ b/collectors/apps.plugin/apps_plugin.c
@@ -508,6 +508,7 @@ struct file_descriptor {
static int
all_files_len = 0,
all_files_size = 0;
+ long double currentmaxfds = 0;
// ----------------------------------------------------------------------------
// read users and groups from files
@@ -535,7 +536,7 @@ enum user_or_group_id_type {
struct user_or_group_ids{
enum user_or_group_id_type type;
- avl_tree index;
+ avl_tree_type index;
struct user_or_group_id *root;
char filename[FILENAME_MAX + 1];
@@ -1691,7 +1692,7 @@ int file_descriptor_compare(void* a, void* b) {
// int file_descriptor_iterator(avl *a) { if(a) {}; return 0; }
-avl_tree all_files_index = {
+avl_tree_type all_files_index = {
NULL,
file_descriptor_compare
};
@@ -2451,7 +2452,8 @@ static inline void link_all_processes_to_their_parents(void) {
p->parent = NULL;
if(unlikely(!p->ppid)) {
- p->parent = NULL;
+ //unnecessary code from apps_plugin.c
+ //p->parent = NULL;
continue;
}
@@ -2997,6 +2999,7 @@ static inline void aggregate_pid_fds_on_targets(struct pid_stat *p) {
reallocate_target_fds(u);
reallocate_target_fds(g);
+ long double currentfds = 0;
size_t c, size = p->fds_size;
struct pid_fd *fds = p->fds;
for(c = 0; c < size ;c++) {
@@ -3005,10 +3008,15 @@ static inline void aggregate_pid_fds_on_targets(struct pid_stat *p) {
if(likely(fd <= 0 || fd >= all_files_size))
continue;
+ currentfds++;
+
aggregate_fd_on_target(fd, w);
aggregate_fd_on_target(fd, u);
aggregate_fd_on_target(fd, g);
}
+
+ if (currentfds >= currentmaxfds)
+ currentmaxfds = currentfds;
}
static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o) {
@@ -3606,6 +3614,10 @@ static void send_collected_data_to_netdata(struct target *root, const char *type
if (unlikely(w->exposed && w->processes))
send_SET(w->name, w->openfiles);
}
+ if (!strcmp("apps", type)){
+ kernel_uint_t usedfdpercentage = (kernel_uint_t) ((currentmaxfds * 100) / sysconf(_SC_OPEN_MAX));
+ fprintf(stdout, "VARIABLE fdperc = " KERNEL_UINT_FORMAT "\n", usedfdpercentage);
+ }
send_END();
send_BEGIN(type, "sockets", dt);
@@ -3655,13 +3667,13 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type
debug_log_int("%s just added - regenerating charts.", w->name);
}
}
-
+
// nothing more to show
if(!newly_added && show_guest_time == show_guest_time_old) return;
// we have something new to show
// update the charts
- fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu stacked 20001 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
+ fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (100%% = 1 core)' 'percentage' cpu %s.cpu stacked 20001 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, time_factor * RATES_DETAIL / 100, w->hidden ? "hidden" : "");
@@ -3717,20 +3729,20 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type
}
#endif
- fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_user stacked 20020 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
+ fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (100%% = 1 core)' 'percentage' cpu %s.cpu_user stacked 20020 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU);
}
- fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_system stacked 20021 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every);
+ fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (100%% = 1 core)' 'percentage' cpu %s.cpu_system stacked 20021 %d\n", type, title, type, update_every);
for (w = root; w ; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU);
}
if(show_guest_time) {
- fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_system stacked 20022 %d\n", type, title, (processors * 100), processors, (processors > 1) ? "s" : "", type, update_every);
+ fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (100%% = 1 core)' 'percentage' cpu %s.cpu_system stacked 20022 %d\n", type, title, type, update_every);
for (w = root; w; w = w->next) {
if(unlikely(w->exposed))
fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU);
@@ -4110,8 +4122,6 @@ int main(int argc, char **argv) {
procfile_adaptive_initial_allocation = 1;
- time_t started_t = now_monotonic_sec();
-
get_system_HZ();
#ifdef __FreeBSD__
time_factor = 1000000ULL / RATES_DETAIL; // FreeBSD uses usecs
@@ -4173,12 +4183,19 @@ int main(int argc, char **argv) {
usec_t dt = heartbeat_next(&hb, step);
#endif
+ struct pollfd pollfd = { .fd = fileno(stdout), .events = POLLERR };
+ if (unlikely(poll(&pollfd, 1, 0) < 0))
+ fatal("Cannot check if a pipe is available");
+ if (unlikely(pollfd.revents & POLLERR))
+ fatal("Cannot write to a pipe");
+
if(!collect_data_for_all_processes()) {
error("Cannot collect /proc data for running processes. Disabling apps.plugin...");
printf("DISABLE\n");
exit(1);
}
+ currentmaxfds = 0;
calculate_netdata_statistics();
normalize_utilization(apps_groups_root_target);
@@ -4206,8 +4223,5 @@ int main(int argc, char **argv) {
show_guest_time_old = show_guest_time;
debug_log("done Loop No %zu", global_iterations_counter);
-
- // restart check (14400 seconds)
- if(now_monotonic_sec() - started_t > 14400) exit(0);
}
}
diff --git a/collectors/cgroups.plugin/Makefile.in b/collectors/cgroups.plugin/Makefile.in
deleted file mode 100644
index 42fa2f189..000000000
--- a/collectors/cgroups.plugin/Makefile.in
+++ /dev/null
@@ -1,618 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/cgroups.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_noinst_DATA) $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- cgroup-name.sh \
- $(NULL)
-
-SUFFIXES = .in
-dist_plugins_SCRIPTS = \
- cgroup-name.sh \
- cgroup-network-helper.sh \
- $(NULL)
-
-dist_noinst_DATA = \
- cgroup-name.sh.in \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(am__empty):
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_pluginsSCRIPTS
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_pluginsSCRIPTS
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_pluginsSCRIPTS install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_pluginsSCRIPTS
-
-.PRECIOUS: Makefile
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- -e 's#[@]registrydir_POST@#$(registrydir)#g' \
- -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md
index f34055b39..21dbcae83 100644
--- a/collectors/cgroups.plugin/README.md
+++ b/collectors/cgroups.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "cgroups.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/README.md
+-->
+
# cgroups.plugin
You can monitor containers and virtual machines using **cgroups**.
@@ -98,7 +103,15 @@ For this mapping Netdata provides 2 configuration options:
The whole point for the additional pattern list, is to limit the number of times the script will be called. Without this pattern list, the script might be called thousands of times, depending on the number of cgroups available in the system.
-The above pattern list is matched against the path of the cgroup. For matched cgroups, Netdata calls the script [cgroup-name.sh](cgroup-name.sh.in) to get its name. This script queries `docker`, or applies heuristics to find give a name for the cgroup.
+The above pattern list is matched against the path of the cgroup. For matched cgroups, Netdata calls the script [cgroup-name.sh](https://raw.githubusercontent.com/netdata/netdata/master/collectors/cgroups.plugin/cgroup-name.sh.in) to get its name. This script queries `docker`, `kubectl`, `podman`, or applies heuristics to find give a name for the cgroup.
+
+#### Note on Podman container names
+
+Podman's security model is a lot more restrictive than Docker's, so Netdata will not be able to detect container names out of the box unless they were started by the same user as Netdata itself.
+
+If Podman is used in "rootful" mode, it's also possible to use `podman system service` to grant Netdata access to container names. To do this, ensure `podman system service` is running and Netdata has access to `/run/podman/podman.sock` (the default permissions as specified by upstream are `0600`, with owner `root`, so you will have to adjust the configuration).
+
+[docker-socket-proxy](https://github.com/Tecnativa/docker-socket-proxy) can also be used to give Netdata restricted access to the socket. Note that `PODMAN_HOST` in Netdata's environment should be set to the proxy's URL in this case.
### charts with zero metrics
@@ -132,7 +145,7 @@ Support per distribution:
|Fedora 25|YES|[here](http://pastebin.com/ax0373wF)||
|Debian 8|NO||can be enabled, see below|
|AMI|NO|[here](http://pastebin.com/FrxmptjL)|not a systemd system|
-|Centos 7.3.1611|NO|[here](http://pastebin.com/SpzgezAg)|can be enabled, see below|
+|CentOS 7.3.1611|NO|[here](http://pastebin.com/SpzgezAg)|can be enabled, see below|
### how to enable cgroup accounting on systemd systems that is by default disabled
diff --git a/collectors/cgroups.plugin/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh
deleted file mode 100644
index 2f66317d8..000000000
--- a/collectors/cgroups.plugin/cgroup-name.sh
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/env bash
-#shellcheck disable=SC2001
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Script to find a better name for cgroups
-#
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-# -----------------------------------------------------------------------------
-
-PROGRAM_NAME="$(basename "${0}")"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-function docker_get_name_classic() {
- local id="${1}"
- info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
- NAME="$(docker ps --filter=id="${id}" --format="{{.Names}}")"
- return 0
-}
-
-function docker_get_name_api() {
- local path="/containers/${1}/json"
- if [ -z "${DOCKER_HOST}" ]; then
- warning "No DOCKER_HOST is set"
- return 1
- fi
- if ! command -v jq >/dev/null 2>&1; then
- warning "Can't find jq command line tool. jq is required for netdata to retrieve docker container name using ${DOCKER_HOST} API, falling back to docker ps"
- return 1
- fi
- if [ -S "${DOCKER_HOST}" ]; then
- info "Running API command: curl --unix-socket ${DOCKER_HOST} http://localhost${path}"
- JSON=$(curl -sS --unix-socket "${DOCKER_HOST}" "http://localhost${path}")
- elif [ "${DOCKER_HOST}" == "/var/run/docker.sock" ]; then
- warning "Docker socket was not found at ${DOCKER_HOST}"
- return 1
- else
- info "Running API command: curl ${DOCKER_HOST}${path}"
- JSON=$(curl -sS "${DOCKER_HOST}${path}")
- fi
- NAME=$(echo "$JSON" | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
- return 0
-}
-
-function k8s_get_name() {
- # Take the last part of the delimited path identifier (expecting either _ or / as a delimiter).
- local id="${1##*_}"
- if [ "${id}" == "${1}" ]; then
- id="${1##*/}"
- fi
- KUBE_TOKEN="$(</var/run/secrets/kubernetes.io/serviceaccount/token)"
- if command -v jq >/dev/null 2>&1; then
- NAME="$(
- curl -sSk -H "Authorization: Bearer $KUBE_TOKEN" "https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT/api/v1/pods" |
- jq -r '.items[] | "k8s_\(.metadata.namespace)_\(.metadata.name)_\(.metadata.uid)_" + (.status.containerStatuses[]? | "\(.name) \(.containerID)")' |
- grep "$id" |
- cut -d' ' -f1
- )"
- else
- warning "jq command not available, k8s_get_name() cannot execute. Please install jq should you wish for k8s to be fully functional"
- fi
-
- if [ -z "${NAME}" ]; then
- warning "cannot find the name of k8s pod with containerID '${id}'. Setting name to ${id} and disabling it"
- NAME="${id}"
- NAME_NOT_FOUND=3
- else
- info "k8s containerID '${id}' has chart name (namespace_podname_poduid_containername) '${NAME}'"
- fi
-}
-
-function docker_get_name() {
- local id="${1}"
- if hash docker 2>/dev/null; then
- docker_get_name_classic "${id}"
- else
- docker_get_name_api "${id}" || docker_get_name_classic "${id}"
- fi
- if [ -z "${NAME}" ]; then
- warning "cannot find the name of docker container '${id}'"
- NAME_NOT_FOUND=2
- NAME="${id:0:12}"
- else
- info "docker container '${id}' is named '${NAME}'"
- fi
-}
-
-function docker_validate_id() {
- local id="${1}"
- if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
- docker_get_name "${id}"
- else
- error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
- fi
-}
-
-
-# -----------------------------------------------------------------------------
-
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
-
-DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
-CGROUP="${1}"
-NAME_NOT_FOUND=0
-NAME=
-
-# -----------------------------------------------------------------------------
-
-if [ -z "${CGROUP}" ]; then
- fatal "called without a cgroup name. Nothing to do."
-fi
-
-for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"; do
- if [ -f "${CONFIG}" ]; then
- NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed 's/[[:space:]]\+/ /g' | cut -d ' ' -f 2)"
- if [ -z "${NAME}" ]; then
- info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
- else
- break
- fi
- #else
- # info "configuration file '${CONFIG}' is not available."
- fi
-done
-
-if [ -z "${NAME}" ] && [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_PORT_443_TCP_PORT}" ] && [[ ${CGROUP} =~ ^.*kubepods.* ]]; then
- k8s_get_name "${CGROUP}"
-fi
-
-if [ -z "${NAME}" ]; then
- if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # docker containers
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
- elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # ECS
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
-
- elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
- # systemd-nspawn
- NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
-
- elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
- # libvirtd / qemu virtual machines
- # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
- NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
-
- elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
- # libvirtd / qemu virtual machines
- NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
-
- elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]; then
- # Proxmox VMs
-
- FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
- if [[ -f $FILENAME && -r $FILENAME ]]; then
- NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d /etc/pve ]]; then
- # Proxmox Containers (LXC)
-
- FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
- if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
- NAME=$(grep -e '^hostname: ' "/etc/pve/lxc/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- fi
-
- [ -z "${NAME}" ] && NAME="${CGROUP}"
- [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
-fi
-
-info "cgroup '${CGROUP}' is called '${NAME}'"
-echo "${NAME}"
-
-exit ${NAME_NOT_FOUND}
-
diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in
index f7b765bb6..19fbf3989 100755
--- a/collectors/cgroups.plugin/cgroup-name.sh.in
+++ b/collectors/cgroups.plugin/cgroup-name.sh.in
@@ -17,117 +17,360 @@ export LC_ALL=C
PROGRAM_NAME="$(basename "${0}")"
logdate() {
- date "+%Y-%m-%d %H:%M:%S"
+ date "+%Y-%m-%d %H:%M:%S"
}
log() {
- local status="${1}"
- shift
+ local status="${1}"
+ shift
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
+ echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
}
warning() {
- log WARNING "${@}"
+ log WARNING "${@}"
}
error() {
- log ERROR "${@}"
+ log ERROR "${@}"
}
info() {
- log INFO "${@}"
+ log INFO "${@}"
}
fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-function docker_get_name_classic() {
- local id="${1}"
- info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
- NAME="$(docker ps --filter=id="${id}" --format="{{.Names}}")"
- return 0
-}
-
-function docker_get_name_api() {
- local path="/containers/${1}/json"
- if [ -z "${DOCKER_HOST}" ]; then
- warning "No DOCKER_HOST is set"
- return 1
- fi
- if ! command -v jq >/dev/null 2>&1; then
- warning "Can't find jq command line tool. jq is required for netdata to retrieve docker container name using ${DOCKER_HOST} API, falling back to docker ps"
- return 1
- fi
- if [ -S "${DOCKER_HOST}" ]; then
- info "Running API command: curl --unix-socket ${DOCKER_HOST} http://localhost${path}"
- JSON=$(curl -sS --unix-socket "${DOCKER_HOST}" "http://localhost${path}")
- elif [ "${DOCKER_HOST}" == "/var/run/docker.sock" ]; then
- warning "Docker socket was not found at ${DOCKER_HOST}"
- return 1
- else
- info "Running API command: curl ${DOCKER_HOST}${path}"
- JSON=$(curl -sS "${DOCKER_HOST}${path}")
- fi
- NAME=$(echo "$JSON" | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
- return 0
+ log FATAL "${@}"
+ exit 1
+}
+
+function docker_like_get_name_command() {
+ local command="${1}"
+ local id="${2}"
+ info "Running command: ${command} ps --filter=id=\"${id}\" --format=\"{{.Names}}\""
+ NAME="$(${command} ps --filter=id="${id}" --format="{{.Names}}")"
+ return 0
+}
+
+function docker_like_get_name_api() {
+ local host_var="${1}"
+ local host="${!host_var}"
+ local path="/containers/${2}/json"
+ if [ -z "${host}" ]; then
+ warning "No ${host_var} is set"
+ return 1
+ fi
+ if ! command -v jq > /dev/null 2>&1; then
+ warning "Can't find jq command line tool. jq is required for netdata to retrieve container name using ${host} API, falling back to docker ps"
+ return 1
+ fi
+ if [ -S "${host}" ]; then
+ info "Running API command: curl --unix-socket \"${host}\" http://localhost${path}"
+ JSON=$(curl -sS --unix-socket "${host}" "http://localhost${path}")
+ else
+ info "Running API command: curl \"${host}${path}\""
+ JSON=$(curl -sS "${host}${path}")
+ fi
+ NAME=$(echo "${JSON}" | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||')
+ return 0
+}
+
+# get_lbl_val returns the value for the label with the given name.
+# Returns "null" string if the label doesn't exist.
+# Expected labels format: 'name="value",...'.
+function get_lbl_val() {
+ local labels want_name
+ labels="${1}"
+ want_name="${2}"
+
+ IFS=, read -ra labels <<< "$labels"
+
+ local lname lval
+ for l in "${labels[@]}"; do
+ IFS="=" read -r lname lval <<< "$l"
+ if [ "$want_name" = "$lname" ] && [ -n "$lval" ]; then
+ echo "${lval:1:-1}" # trim "
+ return 0
+ fi
+ done
+
+ echo "null"
+ return 1
+}
+
+function add_lbl_prefix() {
+ local orig_labels prefix
+ orig_labels="${1}"
+ prefix="${2}"
+
+ IFS=, read -ra labels <<< "$orig_labels"
+
+ local new_labels
+ for l in "${labels[@]}"; do
+ new_labels+="${prefix}${l},"
+ done
+
+ echo "${new_labels:0:-1}" # trim last ','
+}
+
+# k8s_get_kubepod_name resolves */kubepods/* cgroup name.
+# pod level cgroup name format: 'pod_<namespace>_<pod_name>'
+# container level cgroup name format: 'cntr_<namespace>_<pod_name>_<container_name>'
+function k8s_get_kubepod_name() {
+ # GKE /sys/fs/cgroup/*/ tree:
+ # |-- kubepods
+ # | |-- burstable
+ # | | |-- pod98cee708-023b-11eb-933d-42010a800193
+ # | | | |-- 922161c98e6ea450bf665226cdc64ca2aa3e889934c2cff0aec4325f8f78ac03
+ # | | `-- a5d223eec35e00f5a1c6fa3e3a5faac6148cdc1f03a2e762e873b7efede012d7
+ # | `-- pode314bbac-d577-11ea-a171-42010a80013b
+ # | |-- 7d505356b04507de7b710016d540b2759483ed5f9136bb01a80872b08f771930
+ # | `-- 88ab4683b99cfa7cc8c5f503adf7987dd93a3faa7c4ce0d17d419962b3220d50
+ #
+ # Minikube (v1.8.2) /sys/fs/cgroup/*/ tree:
+ # |-- kubepods.slice
+ # | |-- kubepods-besteffort.slice
+ # | | |-- kubepods-besteffort-pod10fb5647_c724_400c_b9cc_0e6eae3110e7.slice
+ # | | | |-- docker-36e5eb5056dfdf6dbb75c0c44a1ecf23217fe2c50d606209d8130fcbb19fb5a7.scope
+ # | | | `-- docker-87e18c2323621cf0f635c53c798b926e33e9665c348c60d489eef31ee1bd38d7.scope
+ #
+ # NOTE: cgroups plugin uses '_' to join dir names, so it is <parent>_<child>_<child>_...
+
+ local fn="${FUNCNAME[0]}"
+ local id="${1}"
+
+ if [[ ! $id =~ ^kubepods ]]; then
+ warning "${fn}: '${id}' is not kubepod cgroup."
+ return 1
+ fi
+
+ local clean_id="$id"
+ clean_id=${clean_id//.slice/}
+ clean_id=${clean_id//.scope/}
+
+ local name pod_uid cntr_id
+ if [[ $clean_id == "kubepods" ]]; then
+ name="$clean_id"
+ elif [[ $clean_id =~ .+(besteffort|burstable|guaranteed)$ ]]; then
+ # kubepods_<QOS_CLASS>
+ # kubepods_kubepods-<QOS_CLASS>
+ name=${clean_id//-/_}
+ name=${name/#kubepods_kubepods/kubepods}
+ elif [[ $clean_id =~ .+pod[a-f0-9_-]+_docker-([a-f0-9]+)$ ]]; then
+ # ...pod<POD_UID>_docker-<CONTAINER_ID> (POD_UID w/ "_")
+ cntr_id=${BASH_REMATCH[1]}
+ elif [[ $clean_id =~ .+pod[a-f0-9-]+_([a-f0-9]+)$ ]]; then
+ # ...pod<POD_UID>_<CONTAINER_ID>
+ cntr_id=${BASH_REMATCH[1]}
+ elif [[ $clean_id =~ .+pod([a-f0-9_-]+)$ ]]; then
+ # ...pod<POD_UID> (POD_UID w/ and w/o "_")
+ pod_uid=${BASH_REMATCH[1]}
+ pod_uid=${pod_uid//_/-}
+ fi
+
+ if [ -n "$name" ]; then
+ echo "$name"
+ return 0
+ fi
+
+ if [ -z "$pod_uid" ] && [ -z "$cntr_id" ]; then
+ warning "${fn}: can't extract pod_uid or container_id from the cgroup '$id'."
+ return 1
+ fi
+
+ [ -n "$pod_uid" ] && info "${fn}: cgroup '$id' is a pod(uid:$pod_uid)"
+ [ -n "$cntr_id" ] && info "${fn}: cgroup '$id' is a container(id:$cntr_id)"
+
+ if ! command -v jq > /dev/null 2>&1; then
+ warning "${fn}: 'jq' command not available."
+ return 1
+ fi
+
+ local kube_system_ns
+ local tmp_kube_system_ns_file="${TMPDIR:-"/tmp/"}netdata-cgroups-kube-system-ns"
+ [ -f "$tmp_kube_system_ns_file" ] && kube_system_ns=$(cat "$tmp_kube_system_ns_file" 2> /dev/null)
+
+ local pods
+ if [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_PORT_443_TCP_PORT}" ]; then
+ local token header host url
+ token="$(< /var/run/secrets/kubernetes.io/serviceaccount/token)"
+ header="Authorization: Bearer $token"
+ host="$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT"
+
+ if [ -z "$kube_system_ns" ]; then
+ url="https://$host/api/v1/namespaces/kube-system"
+ # FIX: check HTTP response code
+ if ! kube_system_ns=$(curl -sSk -H "$header" "$url" 2>&1); then
+ warning "${fn}: error on curl '${url}': ${kube_system_ns}."
+ else
+ echo "$kube_system_ns" > "$tmp_kube_system_ns_file" 2> /dev/null
+ fi
+ fi
+
+ url="https://$host/api/v1/pods"
+ [ -n "$MY_NODE_NAME" ] && url+="?fieldSelector=spec.nodeName==$MY_NODE_NAME"
+ # FIX: check HTTP response code
+ if ! pods=$(curl -sSk -H "$header" "$url" 2>&1); then
+ warning "${fn}: error on curl '${url}': ${pods}."
+ return 1
+ fi
+ elif ps -C kubelet > /dev/null 2>&1 && command -v kubectl > /dev/null 2>&1; then
+ if [ -z "$kube_system_ns" ]; then
+ if ! kube_system_ns=$(kubectl get namespaces kube-system -o json 2>&1); then
+ warning "${fn}: error on 'kubectl': ${kube_system_ns}."
+ else
+ echo "$kube_system_ns" > "$tmp_kube_system_ns_file" 2> /dev/null
+ fi
+ fi
+
+ [[ -z ${KUBE_CONFIG+x} ]] && KUBE_CONFIG="/etc/kubernetes/admin.conf"
+ if ! pods=$(kubectl --kubeconfig="$KUBE_CONFIG" get pods --all-namespaces -o json 2>&1); then
+ warning "${fn}: error on 'kubectl': ${pods}."
+ return 1
+ fi
+ else
+ warning "${fn}: not inside the k8s cluster and 'kubectl' command not available."
+ return 1
+ fi
+
+ local kube_system_uid
+ if [ -n "$kube_system_ns" ] && ! kube_system_uid=$(jq -r '.metadata.uid' <<< "$kube_system_ns" 2>&1); then
+ warning "${fn}: error on 'jq' parse kube_system_ns: ${kube_system_uid}."
+ fi
+
+ local jq_filter
+ jq_filter+='.items[] | "'
+ jq_filter+='namespace=\"\(.metadata.namespace)\",'
+ jq_filter+='pod_name=\"\(.metadata.name)\",'
+ jq_filter+='pod_uid=\"\(.metadata.uid)\",'
+ #jq_filter+='\(.metadata.labels | to_entries | map("pod_label_"+.key+"=\""+.value+"\"") | join(",") | if length > 0 then .+"," else . end)'
+ jq_filter+='\((.metadata.ownerReferences[]? | select(.controller==true) | "controller_kind=\""+.kind+"\",controller_name=\""+.name+"\",") // "")'
+ jq_filter+='node_name=\"\(.spec.nodeName)\",'
+ jq_filter+='" + '
+ jq_filter+='(.status.containerStatuses[]? | "'
+ jq_filter+='container_name=\"\(.name)\",'
+ jq_filter+='container_id=\"\(.containerID)\"'
+ jq_filter+='") | '
+ jq_filter+='sub("docker://";"")' # containerID: docker://a346da9bc0e3eaba6b295f64ac16e02f2190db2cef570835706a9e7a36e2c722
+
+ local containers
+ if ! containers=$(jq -r "${jq_filter}" <<< "$pods" 2>&1); then
+ warning "${fn}: error on 'jq' parse pods: ${containers}."
+ return 1
+ fi
+
+ # available labels:
+ # namespace, pod_name, pod_uid, container_name, container_id, node_name
+ local labels
+ if [ -n "$cntr_id" ]; then
+ if labels=$(grep "$cntr_id" <<< "$containers" 2> /dev/null); then
+ labels+=',kind="container"'
+ [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
+ name="cntr"
+ name+="_$(get_lbl_val "$labels" namespace)"
+ name+="_$(get_lbl_val "$labels" pod_name)"
+ name+="_$(get_lbl_val "$labels" container_name)"
+ labels=$(add_lbl_prefix "$labels" "k8s_")
+ name+=" $labels"
+ fi
+ elif [ -n "$pod_uid" ]; then
+ if labels=$(grep "$pod_uid" -m 1 <<< "$containers" 2> /dev/null); then
+ labels="${labels%%,container_*}"
+ labels+=',kind="pod"'
+ [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
+ name="pod"
+ name+="_$(get_lbl_val "$labels" namespace)"
+ name+="_$(get_lbl_val "$labels" pod_name)"
+ labels=$(add_lbl_prefix "$labels" "k8s_")
+ name+=" $labels"
+ fi
+ fi
+
+ # jq filter nonexistent field and nonexistent label value is 'null'
+ if [[ $name =~ _null(_|$) ]]; then
+ warning "${fn}: invalid name: $name (cgroup '$id')"
+ name=""
+ fi
+
+ echo "$name"
+ [ -n "$name" ]
+ return
}
function k8s_get_name() {
- # Take the last part of the delimited path identifier (expecting either _ or / as a delimiter).
- local id="${1##*_}"
- if [ "${id}" == "${1}" ]; then
- id="${1##*/}"
- fi
- KUBE_TOKEN="$(</var/run/secrets/kubernetes.io/serviceaccount/token)"
- if command -v jq >/dev/null 2>&1; then
- NAME="$(
- curl -sSk -H "Authorization: Bearer $KUBE_TOKEN" "https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT/api/v1/pods" |
- jq -r '.items[] | "k8s_\(.metadata.namespace)_\(.metadata.name)_\(.metadata.uid)_" + (.status.containerStatuses[]? | "\(.name) \(.containerID)")' |
- grep "$id" |
- cut -d' ' -f1
- )"
- else
- warning "jq command not available, k8s_get_name() cannot execute. Please install jq should you wish for k8s to be fully functional"
- fi
-
- if [ -z "${NAME}" ]; then
- warning "cannot find the name of k8s pod with containerID '${id}'. Setting name to ${id} and disabling it"
- NAME="${id}"
- NAME_NOT_FOUND=3
- else
- info "k8s containerID '${id}' has chart name (namespace_podname_poduid_containername) '${NAME}'"
- fi
+ local fn="${FUNCNAME[0]}"
+ local id="${1}"
+
+ NAME=$(k8s_get_kubepod_name "$id")
+
+ if [ -z "${NAME}" ]; then
+ warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${id} and disabling it."
+ NAME="${id}"
+ NAME_NOT_FOUND=3
+ else
+ NAME="k8s_${NAME}"
+
+ local name labels
+ name=${NAME%% *}
+ labels=${NAME#* }
+ if [ "$name" != "$labels" ]; then
+ info "${fn}: cgroup '${id}' has chart name '${name}', labels '${labels}"
+ else
+ info "${fn}: cgroup '${id}' has chart name '${NAME}'"
+ fi
+ fi
}
function docker_get_name() {
- local id="${1}"
- if hash docker 2>/dev/null; then
- docker_get_name_classic "${id}"
- else
- docker_get_name_api "${id}" || docker_get_name_classic "${id}"
- fi
- if [ -z "${NAME}" ]; then
- warning "cannot find the name of docker container '${id}'"
- NAME_NOT_FOUND=2
- NAME="${id:0:12}"
- else
- info "docker container '${id}' is named '${NAME}'"
- fi
+ local id="${1}"
+ if hash docker 2> /dev/null; then
+ docker_like_get_name_command docker "${id}"
+ else
+ docker_like_get_name_api DOCKER_HOST "${id}" || docker_like_get_name_command podman "${id}"
+ fi
+ if [ -z "${NAME}" ]; then
+ warning "cannot find the name of docker container '${id}'"
+ NAME_NOT_FOUND=2
+ NAME="${id:0:12}"
+ else
+ info "docker container '${id}' is named '${NAME}'"
+ fi
}
function docker_validate_id() {
- local id="${1}"
- if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
- docker_get_name "${id}"
- else
- error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
- fi
+ local id="${1}"
+ if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
+ docker_get_name "${id}"
+ else
+ error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
+}
+
+function podman_get_name() {
+ local id="${1}"
+
+ # for Podman, prefer using the API if we can, as netdata will not normally have access
+ # to other users' containers, so they will not be visible when running `podman ps`
+ docker_like_get_name_api PODMAN_HOST "${id}" || docker_like_get_name_command podman "${id}"
+
+ if [ -z "${NAME}" ]; then
+ warning "cannot find the name of podman container '${id}'"
+ NAME_NOT_FOUND=2
+ NAME="${id:0:12}"
+ else
+ info "podman container '${id}' is named '${NAME}'"
+ fi
}
+function podman_validate_id() {
+ local id="${1}"
+ if [ -n "${id}" ] && [ ${#id} -eq 64 ]; then
+ podman_get_name "${id}"
+ else
+ error "a podman id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
+}
# -----------------------------------------------------------------------------
@@ -135,6 +378,7 @@ function docker_validate_id() {
[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
+PODMAN_HOST="${PODMAN_HOST:=/run/podman/podman.sock}"
CGROUP="${1}"
NAME_NOT_FOUND=0
NAME=
@@ -142,77 +386,85 @@ NAME=
# -----------------------------------------------------------------------------
if [ -z "${CGROUP}" ]; then
- fatal "called without a cgroup name. Nothing to do."
+ fatal "called without a cgroup name. Nothing to do."
fi
for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"; do
- if [ -f "${CONFIG}" ]; then
- NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed 's/[[:space:]]\+/ /g' | cut -d ' ' -f 2)"
- if [ -z "${NAME}" ]; then
- info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
- else
- break
- fi
- #else
- # info "configuration file '${CONFIG}' is not available."
- fi
+ if [ -f "${CONFIG}" ]; then
+ NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed 's/[[:space:]]\+/ /g' | cut -d ' ' -f 2)"
+ if [ -z "${NAME}" ]; then
+ info "cannot find cgroup '${CGROUP}' in '${CONFIG}'."
+ else
+ break
+ fi
+ #else
+ # info "configuration file '${CONFIG}' is not available."
+ fi
done
-if [ -z "${NAME}" ] && [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_PORT_443_TCP_PORT}" ] && [[ ${CGROUP} =~ ^.*kubepods.* ]]; then
- k8s_get_name "${CGROUP}"
+if [ -z "${NAME}" ]; then
+ if [[ ${CGROUP} =~ ^.*kubepods.* ]]; then
+ k8s_get_name "${CGROUP}"
+ fi
fi
if [ -z "${NAME}" ]; then
- if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # docker containers
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
- elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # ECS
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
-
- elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
- # systemd-nspawn
- NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
-
- elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
- # libvirtd / qemu virtual machines
- # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
- NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
-
- elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
- # libvirtd / qemu virtual machines
- NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
-
- elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]; then
- # Proxmox VMs
-
- FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
- if [[ -f $FILENAME && -r $FILENAME ]]; then
- NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d /etc/pve ]]; then
- # Proxmox Containers (LXC)
-
- FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
- if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
- NAME=$(grep -e '^hostname: ' "/etc/pve/lxc/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- fi
-
- [ -z "${NAME}" ] && NAME="${CGROUP}"
- [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
+ if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # docker containers
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+ elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # ECS
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+ elif [[ ${CGROUP} =~ ^.*libpod-[a-fA-F0-9]+.*$ ]]; then
+ # Podman
+ PODMANID="$(echo "${CGROUP}" | sed "s|^.*libpod-\([a-fA-F0-9]\+\).*$|\1|")"
+ podman_validate_id "${PODMANID}"
+
+ elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
+ # systemd-nspawn
+ NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
+
+ elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')"
+
+ elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
+
+ elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]; then
+ # Proxmox VMs
+
+ FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
+ if [[ -f $FILENAME && -r $FILENAME ]]; then
+ NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d /etc/pve ]]; then
+ # Proxmox Containers (LXC)
+
+ FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
+ if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
+ NAME=$(grep -e '^hostname: ' "/etc/pve/lxc/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ ${CGROUP} =~ lxc.payload.* ]]; then
+ # LXC 4.0
+ NAME="$(echo "${CGROUP}" | sed 's/lxc\.payload\.\(.*\)/\1/g')"
+ fi
+
+ [ -z "${NAME}" ] && NAME="${CGROUP}"
+ [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
fi
info "cgroup '${CGROUP}' is called '${NAME}'"
echo "${NAME}"
exit ${NAME_NOT_FOUND}
-
diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c
index d01065422..921b14dfb 100644
--- a/collectors/cgroups.plugin/cgroup-network.c
+++ b/collectors/cgroups.plugin/cgroup-network.c
@@ -689,8 +689,10 @@ int main(int argc, char **argv) {
}
else if(!strcmp(argv[1], "--cgroup")) {
char *cgroup = argv[2];
- if(verify_path(cgroup) == -1)
- fatal("cgroup '%s' does not exist or is not valid.", cgroup);
+ if(verify_path(cgroup) == -1) {
+ error("cgroup '%s' does not exist or is not valid.", cgroup);
+ return 1;
+ }
pid = read_pid_from_cgroup(cgroup);
call_the_helper(pid, cgroup);
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c
index d9d130f7e..705c51735 100644
--- a/collectors/cgroups.plugin/sys_fs_cgroup.c
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.c
@@ -23,6 +23,11 @@ static int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO;
static int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO;
static int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO;
static int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_cpu = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_io_some = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_io_full = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_memory_some = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_AUTO;
static int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES;
static int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
@@ -68,6 +73,130 @@ static uint32_t Write_hash = 0;
static uint32_t user_hash = 0;
static uint32_t system_hash = 0;
+enum cgroups_type { CGROUPS_AUTODETECT_FAIL, CGROUPS_V1, CGROUPS_V2 };
+
+enum cgroups_systemd_setting {
+ SYSTEMD_CGROUP_ERR,
+ SYSTEMD_CGROUP_LEGACY,
+ SYSTEMD_CGROUP_HYBRID,
+ SYSTEMD_CGROUP_UNIFIED
+};
+
+struct cgroups_systemd_config_setting {
+ char *name;
+ enum cgroups_systemd_setting setting;
+};
+
+static struct cgroups_systemd_config_setting cgroups_systemd_options[] = {
+ { .name = "legacy", .setting = SYSTEMD_CGROUP_LEGACY },
+ { .name = "hybrid", .setting = SYSTEMD_CGROUP_HYBRID },
+ { .name = "unified", .setting = SYSTEMD_CGROUP_UNIFIED },
+ { .name = NULL, .setting = SYSTEMD_CGROUP_ERR },
+};
+
+/* on Fed systemd is not in PATH for some reason */
+#define SYSTEMD_CMD_RHEL "/usr/lib/systemd/systemd --version"
+#define SYSTEMD_HIERARCHY_STRING "default-hierarchy="
+
+#define MAXSIZE_PROC_CMDLINE 4096
+static enum cgroups_systemd_setting cgroups_detect_systemd(const char *exec)
+{
+ pid_t command_pid;
+ enum cgroups_systemd_setting retval = SYSTEMD_CGROUP_ERR;
+ char buf[MAXSIZE_PROC_CMDLINE];
+ char *begin, *end;
+
+ FILE *f = mypopen(exec, &command_pid);
+
+ if (!f)
+ return retval;
+
+ while (fgets(buf, MAXSIZE_PROC_CMDLINE, f) != NULL) {
+ if ((begin = strstr(buf, SYSTEMD_HIERARCHY_STRING))) {
+ end = begin = begin + strlen(SYSTEMD_HIERARCHY_STRING);
+ if (!*begin)
+ break;
+ while (isalpha(*end))
+ end++;
+ *end = 0;
+ for (int i = 0; cgroups_systemd_options[i].name; i++) {
+ if (!strcmp(begin, cgroups_systemd_options[i].name)) {
+ retval = cgroups_systemd_options[i].setting;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ if (mypclose(f, command_pid))
+ return SYSTEMD_CGROUP_ERR;
+
+ return retval;
+}
+
+static enum cgroups_type cgroups_try_detect_version()
+{
+ pid_t command_pid;
+ char buf[MAXSIZE_PROC_CMDLINE];
+ enum cgroups_systemd_setting systemd_setting;
+ int cgroups2_available = 0;
+
+ // 1. check if cgroups2 availible on system at all
+ FILE *f = mypopen("grep cgroup /proc/filesystems", &command_pid);
+ if (!f) {
+ error("popen failed");
+ return CGROUPS_AUTODETECT_FAIL;
+ }
+ while (fgets(buf, MAXSIZE_PROC_CMDLINE, f) != NULL) {
+ if (strstr(buf, "cgroup2")) {
+ cgroups2_available = 1;
+ break;
+ }
+ }
+ if(mypclose(f, command_pid))
+ return CGROUPS_AUTODETECT_FAIL;
+
+ if(!cgroups2_available)
+ return CGROUPS_V1;
+
+ // 2. check systemd compiletime setting
+ if ((systemd_setting = cgroups_detect_systemd("systemd --version")) == SYSTEMD_CGROUP_ERR)
+ systemd_setting = cgroups_detect_systemd(SYSTEMD_CMD_RHEL);
+
+ if(systemd_setting == SYSTEMD_CGROUP_ERR)
+ return CGROUPS_AUTODETECT_FAIL;
+
+ if(systemd_setting == SYSTEMD_CGROUP_LEGACY || systemd_setting == SYSTEMD_CGROUP_HYBRID) {
+ // curently we prefer V1 if HYBRID is set as it seems to be more feature complete
+ // in the future we might want to continue here if SYSTEMD_CGROUP_HYBRID
+ // and go ahead with V2
+ return CGROUPS_V1;
+ }
+
+ // 3. if we are unified as on Fedora (default cgroups2 only mode)
+ // check kernel command line flag that can override that setting
+ f = fopen("/proc/cmdline", "r");
+ if (!f) {
+ error("Error reading kernel boot commandline parameters");
+ return CGROUPS_AUTODETECT_FAIL;
+ }
+
+ if (!fgets(buf, MAXSIZE_PROC_CMDLINE, f)) {
+ error("couldn't read all cmdline params into buffer");
+ fclose(f);
+ return CGROUPS_AUTODETECT_FAIL;
+ }
+
+ fclose(f);
+
+ if (strstr(buf, "systemd.unified_cgroup_hierarchy=0")) {
+ info("cgroups v2 (unified cgroups) is available but are disabled on this system.");
+ return CGROUPS_V1;
+ }
+ return CGROUPS_V2;
+}
+
void read_cgroup_plugin_configuration() {
system_page_size = sysconf(_SC_PAGESIZE);
@@ -84,7 +213,11 @@ void read_cgroup_plugin_configuration() {
if(cgroup_check_for_new_every < cgroup_update_every)
cgroup_check_for_new_every = cgroup_update_every;
- cgroup_use_unified_cgroups = config_get_boolean_ondemand("plugin:cgroups", "use unified cgroups", cgroup_use_unified_cgroups);
+ cgroup_use_unified_cgroups = config_get_boolean_ondemand("plugin:cgroups", "use unified cgroups", CONFIG_BOOLEAN_AUTO);
+ if(cgroup_use_unified_cgroups == CONFIG_BOOLEAN_AUTO)
+ cgroup_use_unified_cgroups = (cgroups_try_detect_version() == CGROUPS_V2);
+
+ info("use unified cgroups %s", cgroup_use_unified_cgroups ? "true" : "false");
cgroup_containers_chart_priority = (int)config_get_number("plugin:cgroups", "containers priority", cgroup_containers_chart_priority);
if(cgroup_containers_chart_priority < 1)
@@ -105,6 +238,12 @@ void read_cgroup_plugin_configuration() {
cgroup_enable_blkio_queued_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio queued operations", cgroup_enable_blkio_queued_ops);
cgroup_enable_blkio_merged_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio merged operations", cgroup_enable_blkio_merged_ops);
+ cgroup_enable_pressure_cpu = config_get_boolean_ondemand("plugin:cgroups", "enable cpu pressure", cgroup_enable_pressure_cpu);
+ cgroup_enable_pressure_io_some = config_get_boolean_ondemand("plugin:cgroups", "enable io some pressure", cgroup_enable_pressure_io_some);
+ cgroup_enable_pressure_io_full = config_get_boolean_ondemand("plugin:cgroups", "enable io full pressure", cgroup_enable_pressure_io_full);
+ cgroup_enable_pressure_memory_some = config_get_boolean_ondemand("plugin:cgroups", "enable memory some pressure", cgroup_enable_pressure_memory_some);
+ cgroup_enable_pressure_memory_full = config_get_boolean_ondemand("plugin:cgroups", "enable memory full pressure", cgroup_enable_pressure_memory_full);
+
cgroup_recheck_zero_blkio_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero blkio every iterations", cgroup_recheck_zero_blkio_every_iterations);
cgroup_recheck_zero_mem_failcnt_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero memory failcnt every iterations", cgroup_recheck_zero_mem_failcnt_every_iterations);
cgroup_recheck_zero_mem_detailed_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero detailed memory every iterations", cgroup_recheck_zero_mem_detailed_every_iterations);
@@ -116,6 +255,13 @@ void read_cgroup_plugin_configuration() {
char filename[FILENAME_MAX + 1], *s;
struct mountinfo *mi, *root = mountinfo_read(0);
if(!cgroup_use_unified_cgroups) {
+ // cgroup v1 does not have pressure metrics
+ cgroup_enable_pressure_cpu =
+ cgroup_enable_pressure_io_some =
+ cgroup_enable_pressure_io_full =
+ cgroup_enable_pressure_memory_some =
+ cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_NO;
+
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct");
if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct");
if(!mi) {
@@ -216,9 +362,12 @@ void read_cgroup_plugin_configuration() {
// ----------------------------------------------------------------
" /machine.slice/*.service " // #3367 systemd-nspawn
+ " /kubepods/pod*/* " // k8s containers
+ " /kubepods/*/pod*/* " // k8s containers
// ----------------------------------------------------------------
+ " !/kubepods* " // all other k8s cgroups
" !*/vcpu* " // libvirtd adds these sub-cgroups
" !*/emulator " // libvirtd adds these sub-cgroups
" !*.mount "
@@ -233,6 +382,9 @@ void read_cgroup_plugin_configuration() {
" !/libvirt "
" !/lxc "
" !/lxc/*/* " // #1397 #2649
+ " !/lxc.monitor* "
+ " !/lxc.pivot "
+ " !/lxc.payload "
" !/machine "
" !/qemu "
" !/system "
@@ -252,6 +404,9 @@ void read_cgroup_plugin_configuration() {
" !/user "
" !/user.slice "
" !/lxc/*/* " // #2161 #2649
+ " !/lxc.monitor "
+ " !/lxc.payload/*/* "
+ " !/lxc.payload.* "
" * "
), NULL, SIMPLE_PATTERN_EXACT);
@@ -429,6 +584,7 @@ struct cgroup_network_interface {
#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002
#define CGROUP_OPTIONS_IS_UNIFIED 0x00000004
+// *** WARNING *** The fields are not thread safe. Take care of safe usage.
struct cgroup {
uint32_t options;
@@ -445,6 +601,8 @@ struct cgroup {
char *chart_title;
+ struct label *chart_labels;
+
struct cpuacct_stat cpuacct_stat;
struct cpuacct_usage cpuacct_usage;
@@ -461,6 +619,10 @@ struct cgroup {
struct cgroup_network_interface *interfaces;
+ struct pressure cpu_pressure;
+ struct pressure io_pressure;
+ struct pressure memory_pressure;
+
// per cgroup charts
RRDSET *st_cpu;
RRDSET *st_cpu_limit;
@@ -530,9 +692,22 @@ struct cgroup {
RRDDIM *rd_io_merged_write;
struct cgroup *next;
+ struct cgroup *discovered_next;
} *cgroup_root = NULL;
+uv_mutex_t cgroup_root_mutex;
+
+struct cgroup *discovered_cgroup_root = NULL;
+
+struct discovery_thread {
+ uv_thread_t thread;
+ uv_mutex_t mutex;
+ uv_cond_t cond_var;
+ int start_discovery;
+ int exited;
+} discovery_thread;
+
// ----------------------------------------------------------------------------
// read values from /sys
@@ -798,6 +973,54 @@ static inline void cgroup2_read_blkio(struct blkio *io, unsigned int word_offset
}
}
+static inline void cgroup2_read_pressure(struct pressure *res) {
+ static procfile *ff = NULL;
+
+ if (likely(res->filename)) {
+ ff = procfile_reopen(ff, res->filename, " =", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff)) {
+ res->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff)) {
+ res->updated = 0;
+ cgroups_check = 1;
+ return;
+ }
+
+ size_t lines = procfile_lines(ff);
+ if (lines < 1) {
+ error("CGROUP: file '%s' should have 1+ lines.", res->filename);
+ res->updated = 0;
+ return;
+ }
+
+ res->some.value10 = strtod(procfile_lineword(ff, 0, 2), NULL);
+ res->some.value60 = strtod(procfile_lineword(ff, 0, 4), NULL);
+ res->some.value300 = strtod(procfile_lineword(ff, 0, 6), NULL);
+
+ if (lines > 2) {
+ res->full.value10 = strtod(procfile_lineword(ff, 1, 2), NULL);
+ res->full.value60 = strtod(procfile_lineword(ff, 1, 4), NULL);
+ res->full.value300 = strtod(procfile_lineword(ff, 1, 6), NULL);
+ }
+
+ res->updated = 1;
+
+ if (unlikely(res->some.enabled == CONFIG_BOOLEAN_AUTO)) {
+ res->some.enabled = CONFIG_BOOLEAN_YES;
+ if (lines > 2) {
+ res->full.enabled = CONFIG_BOOLEAN_YES;
+ } else {
+ res->full.enabled = CONFIG_BOOLEAN_NO;
+ }
+ }
+ }
+}
+
static inline void cgroup_read_memory(struct memory *mem, char parent_cg_is_unified) {
static procfile *ff = NULL;
@@ -946,6 +1169,9 @@ static inline void cgroup_read(struct cgroup *cg) {
cgroup2_read_blkio(&cg->io_service_bytes, 0);
cgroup2_read_blkio(&cg->io_serviced, 4);
cgroup2_read_cpuacct_stat(&cg->cpuacct_stat);
+ cgroup2_read_pressure(&cg->cpu_pressure);
+ cgroup2_read_pressure(&cg->io_pressure);
+ cgroup2_read_pressure(&cg->memory_pressure);
cgroup_read_memory(&cg->memory, 1);
}
}
@@ -956,7 +1182,7 @@ static inline void read_all_cgroups(struct cgroup *root) {
struct cgroup *cg;
for(cg = root; cg ; cg = cg->next)
- if(cg->enabled && cg->available && !cg->pending_renames)
+ if(cg->enabled && !cg->pending_renames)
cgroup_read(cg);
}
@@ -1016,7 +1242,7 @@ static inline void read_cgroup_network_interfaces(struct cgroup *cg) {
info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device);
// register a device rename to proc_net_dev.c
- netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id);
+ netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id, cg->chart_labels);
}
}
@@ -1065,6 +1291,35 @@ static inline char *cgroup_chart_id_strdupz(const char *s) {
return r;
}
+char *parse_k8s_data(struct label **labels, char *data)
+{
+ char *name = mystrsep(&data, " ");
+
+ if (!data) {
+ return name;
+ }
+
+ while (data) {
+ char *key = mystrsep(&data, "=");
+
+ char *value;
+ if (data && *data == ',') {
+ value = "";
+ *data++ = '\0';
+ } else {
+ value = mystrsep(&data, ",");
+ }
+ value = strip_double_quotes(value, 1);
+
+ if (!key || *key == '\0' || !value || *value == '\0')
+ continue;
+
+ *labels = add_label_to_list(*labels, key, value, LABEL_SOURCE_KUBERNETES);
+ }
+
+ return name;
+}
+
static inline void cgroup_get_chart_name(struct cgroup *cg) {
debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title);
@@ -1095,12 +1350,19 @@ static inline void cgroup_get_chart_name(struct cgroup *cg) {
cg->enabled = 0;
}
- if(likely(cg->pending_renames < 2)) {
+ if (likely(cg->pending_renames < 2)) {
+ char *name = s;
+
+ if (!strncmp(s, "k8s_", 4)) {
+ free_label_list(cg->chart_labels);
+ name = parse_k8s_data(&cg->chart_labels, s);
+ }
+
freez(cg->chart_title);
- cg->chart_title = cgroup_title_strdupz(s);
+ cg->chart_title = cgroup_title_strdupz(name);
freez(cg->chart_id);
- cg->chart_id = cgroup_chart_id_strdupz(s);
+ cg->chart_id = cgroup_chart_id_strdupz(name);
cg->hash_chart = simple_hash(cg->chart_id);
}
}
@@ -1132,13 +1394,13 @@ static inline struct cgroup *cgroup_add(const char *id) {
if(cgroup_use_unified_cgroups) cg->options |= CGROUP_OPTIONS_IS_UNIFIED;
- if(!cgroup_root)
- cgroup_root = cg;
+ if(!discovered_cgroup_root)
+ discovered_cgroup_root = cg;
else {
// append it
struct cgroup *e;
- for(e = cgroup_root; e->next ;e = e->next) ;
- e->next = cg;
+ for(e = discovered_cgroup_root; e->discovered_next ;e = e->discovered_next) ;
+ e->discovered_next = cg;
}
cgroup_root_count++;
@@ -1204,24 +1466,23 @@ static inline struct cgroup *cgroup_add(const char *id) {
// detect duplicate cgroups
if(cg->enabled) {
struct cgroup *t;
- for (t = cgroup_root; t; t = t->next) {
+ for (t = discovered_cgroup_root; t; t = t->discovered_next) {
if (t != cg && t->enabled && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
- if (!strncmp(t->chart_id, "/system.slice/", 14) && !strncmp(cg->chart_id, "/init.scope/system.slice/", 25)) {
- error("CGROUP: chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
- cg->chart_id, t->id, cg->id, t->id);
- debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
- cg->chart_id, t->id, cg->id, t->id);
- t->enabled = 0;
- t->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
- }
- else {
- error("CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
- cg->chart_id, t->id, cg->id);
- debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
- cg->chart_id, t->id, cg->id);
- cg->enabled = 0;
- cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
- }
+ // TODO: use it after refactoring if system.slice might be scanned before init.scope/system.slice
+ //
+ // if (!strncmp(t->id, "/system.slice/", 14) && !strncmp(cg->id, "/init.scope/system.slice/", 25)) {
+ // error("CGROUP: chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
+ // cg->chart_id, t->id, cg->id, t->id);
+ // t->enabled = 0;
+ // t->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ // }
+ // else {}
+ //
+ // https://github.com/netdata/netdata/issues/797#issuecomment-241248884
+ error("CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
+ cg->chart_id, t->id, cg->id);
+ cg->enabled = 0;
+ cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
break;
}
@@ -1236,6 +1497,12 @@ static inline struct cgroup *cgroup_add(const char *id) {
return cg;
}
+static inline void free_pressure(struct pressure *res) {
+ if (res->some.st) rrdset_is_obsolete(res->some.st);
+ if (res->full.st) rrdset_is_obsolete(res->full.st);
+ freez(res->filename);
+}
+
static inline void cgroup_free(struct cgroup *cg) {
debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available");
@@ -1284,10 +1551,16 @@ static inline void cgroup_free(struct cgroup *cg) {
freez(cg->io_merged.filename);
freez(cg->io_queued.filename);
+ free_pressure(&cg->cpu_pressure);
+ free_pressure(&cg->io_pressure);
+ free_pressure(&cg->memory_pressure);
+
freez(cg->id);
freez(cg->chart_id);
freez(cg->chart_title);
+ free_label_list(cg->chart_labels);
+
freez(cg);
cgroup_root_count--;
@@ -1300,7 +1573,7 @@ static inline struct cgroup *cgroup_find(const char *id) {
uint32_t hash = simple_hash(id);
struct cgroup *cg;
- for(cg = cgroup_root; cg ; cg = cg->next) {
+ for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
if(hash == cg->hash && strcmp(id, cg->id) == 0)
break;
}
@@ -1426,103 +1699,16 @@ static inline void mark_all_cgroups_as_not_available() {
struct cgroup *cg;
// mark all as not available
- for(cg = cgroup_root; cg ; cg = cg->next) {
+ for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
cg->available = 0;
}
}
-static inline void cleanup_all_cgroups() {
- struct cgroup *cg = cgroup_root, *last = NULL;
-
- for(; cg ;) {
- if(!cg->available) {
- // enable the first duplicate cgroup
- {
- struct cgroup *t;
- for(t = cgroup_root; t ; t = t->next) {
- if(t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
- debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id);
- t->enabled = 1;
- t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE;
- break;
- }
- }
- }
-
- if(!last)
- cgroup_root = cg->next;
- else
- last->next = cg->next;
-
- cgroup_free(cg);
-
- if(!last)
- cg = cgroup_root;
- else
- cg = last->next;
- }
- else {
- last = cg;
- cg = cg->next;
- }
- }
-}
-
-static inline void find_all_cgroups() {
- debug(D_CGROUP, "searching for cgroups");
-
- mark_all_cgroups_as_not_available();
- if(!cgroup_use_unified_cgroups) {
- if(cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) {
- if(find_dir_in_subdirs(cgroup_cpuacct_base, NULL, found_subdir_in_dir) == -1) {
- cgroup_enable_cpuacct_stat =
- cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
- error("CGROUP: disabled cpu statistics.");
- }
- }
-
- if(cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io || cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) {
- if(find_dir_in_subdirs(cgroup_blkio_base, NULL, found_subdir_in_dir) == -1) {
- cgroup_enable_blkio_io =
- cgroup_enable_blkio_ops =
- cgroup_enable_blkio_throttle_io =
- cgroup_enable_blkio_throttle_ops =
- cgroup_enable_blkio_merged_ops =
- cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO;
- error("CGROUP: disabled blkio statistics.");
- }
- }
-
- if(cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) {
- if(find_dir_in_subdirs(cgroup_memory_base, NULL, found_subdir_in_dir) == -1) {
- cgroup_enable_memory =
- cgroup_enable_detailed_memory =
- cgroup_enable_swap =
- cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_NO;
- error("CGROUP: disabled memory statistics.");
- }
- }
-
- if(cgroup_search_in_devices) {
- if(find_dir_in_subdirs(cgroup_devices_base, NULL, found_subdir_in_dir) == -1) {
- cgroup_search_in_devices = 0;
- error("CGROUP: disabled devices statistics.");
- }
- }
- }
- else {
- if (find_dir_in_subdirs(cgroup_unified_base, NULL, found_subdir_in_dir) == -1) {
- cgroup_unified_exist = CONFIG_BOOLEAN_NO;
- error("CGROUP: disabled unified cgroups statistics.");
- }
- }
-
- // remove any non-existing cgroups
- cleanup_all_cgroups();
-
+static inline void update_filenames()
+{
struct cgroup *cg;
struct stat buf;
- for(cg = cgroup_root; cg ; cg = cg->next) {
+ for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
// fprintf(stderr, " >>> CGROUP '%s' (%u - %s) with name '%s'\n", cg->id, cg->hash, cg->available?"available":"stopped", cg->name);
if(unlikely(cg->pending_renames))
@@ -1748,12 +1934,175 @@ static inline void find_all_cgroups() {
else
debug(D_CGROUP, "memory.swap file for cgroup '%s': '%s' does not exist.", cg->id, filename);
}
+
+ if (unlikely(cgroup_enable_pressure_cpu && !cg->cpu_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->cpu_pressure.filename = strdupz(filename);
+ cg->cpu_pressure.some.enabled = cgroup_enable_pressure_cpu;
+ cg->cpu_pressure.full.enabled = CONFIG_BOOLEAN_NO;
+ debug(D_CGROUP, "cpu.pressure filename for cgroup '%s': '%s'", cg->id, cg->cpu_pressure.filename);
+ } else {
+ debug(D_CGROUP, "cpu.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
+ }
+ }
+
+ if (unlikely((cgroup_enable_pressure_io_some || cgroup_enable_pressure_io_full) && !cg->io_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/io.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_pressure.filename = strdupz(filename);
+ cg->io_pressure.some.enabled = cgroup_enable_pressure_io_some;
+ cg->io_pressure.full.enabled = cgroup_enable_pressure_io_full;
+ debug(D_CGROUP, "io.pressure filename for cgroup '%s': '%s'", cg->id, cg->io_pressure.filename);
+ } else {
+ debug(D_CGROUP, "io.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
+ }
+ }
+
+ if (unlikely((cgroup_enable_pressure_memory_some || cgroup_enable_pressure_memory_full) && !cg->memory_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory_pressure.filename = strdupz(filename);
+ cg->memory_pressure.some.enabled = cgroup_enable_pressure_memory_some;
+ cg->memory_pressure.full.enabled = cgroup_enable_pressure_memory_full;
+ debug(D_CGROUP, "memory.pressure filename for cgroup '%s': '%s'", cg->id, cg->memory_pressure.filename);
+ } else {
+ debug(D_CGROUP, "memory.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
+ }
+ }
}
}
+}
+
+static inline void cleanup_all_cgroups() {
+ struct cgroup *cg = discovered_cgroup_root, *last = NULL;
+
+ for(; cg ;) {
+ if(!cg->available) {
+ // enable the first duplicate cgroup
+ {
+ struct cgroup *t;
+ for(t = discovered_cgroup_root; t ; t = t->discovered_next) {
+ if(t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
+ debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id);
+ t->enabled = 1;
+ t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ break;
+ }
+ }
+ }
+
+ if(!last)
+ discovered_cgroup_root = cg->discovered_next;
+ else
+ last->discovered_next = cg->discovered_next;
+
+ cgroup_free(cg);
+
+ if(!last)
+ cg = discovered_cgroup_root;
+ else
+ cg = last->discovered_next;
+ }
+ else {
+ last = cg;
+ cg = cg->discovered_next;
+ }
+ }
+}
+
+static inline void copy_discovered_cgroups()
+{
+ debug(D_CGROUP, "copy discovered cgroups to the main group list");
+
+ struct cgroup *cg;
+
+ for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
+ cg->next = cg->discovered_next;
+ }
+
+ cgroup_root = discovered_cgroup_root;
+}
+
+static inline void find_all_cgroups() {
+ debug(D_CGROUP, "searching for cgroups");
+
+ mark_all_cgroups_as_not_available();
+ if(!cgroup_use_unified_cgroups) {
+ if(cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) {
+ if(find_dir_in_subdirs(cgroup_cpuacct_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_enable_cpuacct_stat =
+ cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled cpu statistics.");
+ }
+ }
+
+ if(cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io || cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) {
+ if(find_dir_in_subdirs(cgroup_blkio_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_enable_blkio_io =
+ cgroup_enable_blkio_ops =
+ cgroup_enable_blkio_throttle_io =
+ cgroup_enable_blkio_throttle_ops =
+ cgroup_enable_blkio_merged_ops =
+ cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled blkio statistics.");
+ }
+ }
+
+ if(cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) {
+ if(find_dir_in_subdirs(cgroup_memory_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_enable_memory =
+ cgroup_enable_detailed_memory =
+ cgroup_enable_swap =
+ cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled memory statistics.");
+ }
+ }
+
+ if(cgroup_search_in_devices) {
+ if(find_dir_in_subdirs(cgroup_devices_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_search_in_devices = 0;
+ error("CGROUP: disabled devices statistics.");
+ }
+ }
+ }
+ else {
+ if (find_dir_in_subdirs(cgroup_unified_base, NULL, found_subdir_in_dir) == -1) {
+ cgroup_unified_exist = CONFIG_BOOLEAN_NO;
+ error("CGROUP: disabled unified cgroups statistics.");
+ }
+ }
+
+ update_filenames();
+
+ uv_mutex_lock(&cgroup_root_mutex);
+ cleanup_all_cgroups();
+ copy_discovered_cgroups();
+ uv_mutex_unlock(&cgroup_root_mutex);
debug(D_CGROUP, "done searching for cgroups");
}
+void cgroup_discovery_worker(void *ptr)
+{
+ UNUSED(ptr);
+
+ while (!netdata_exit) {
+ uv_mutex_lock(&discovery_thread.mutex);
+ while (!discovery_thread.start_discovery)
+ uv_cond_wait(&discovery_thread.cond_var, &discovery_thread.mutex);
+ discovery_thread.start_discovery = 0;
+ uv_mutex_unlock(&discovery_thread.mutex);
+
+ if (unlikely(netdata_exit))
+ break;
+
+ find_all_cgroups();
+ }
+
+ discovery_thread.exited = 1;
+}
+
// ----------------------------------------------------------------------------
// generate charts
@@ -1807,7 +2156,7 @@ void update_systemd_services_charts(
if(likely(do_cpu)) {
if(unlikely(!st_cpu)) {
char title[CHART_TITLE_MAX + 1];
- snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (%d%% = %d core%s)", (processors * 100), processors, (processors > 1) ? "s" : "");
+ snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (100%% = 1 core)");
st_cpu = rrdset_create_localhost(
"services"
@@ -2335,7 +2684,7 @@ void update_systemd_services_charts(
// update the values
struct cgroup *cg;
for(cg = cgroup_root; cg ; cg = cg->next) {
- if(unlikely(!cg->available || !cg->enabled || cg->pending_renames || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))
+ if(unlikely(!cg->enabled || cg->pending_renames || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))
continue;
if(likely(do_cpu && cg->cpuacct_stat.updated)) {
@@ -2726,7 +3075,7 @@ void update_cgroup_charts(int update_every) {
struct cgroup *cg;
for(cg = cgroup_root; cg ; cg = cg->next) {
- if(unlikely(!cg->available || !cg->enabled || cg->pending_renames))
+ if(unlikely(!cg->enabled || cg->pending_renames))
continue;
if(likely(cgroup_enable_systemd_services && cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)) {
@@ -2750,7 +3099,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_cpu)) {
- snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "CPU Usage (100%% = 1 core)");
cg->st_cpu = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -2766,6 +3115,9 @@ void update_cgroup_charts(int update_every) {
, update_every
, RRDSET_TYPE_STACKED
);
+
+ rrdset_update_labels(cg->st_cpu, cg->chart_labels);
+
if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
rrddim_add(cg->st_cpu, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(cg->st_cpu, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
@@ -2820,7 +3172,7 @@ void update_cgroup_charts(int update_every) {
rrdsetvar_custom_chart_variable_set(cg->chart_var_cpu_limit, value);
if(unlikely(!cg->st_cpu_limit)) {
- snprintfz(title, CHART_TITLE_MAX, "CPU Usage within the limits for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "CPU Usage within the limits");
cg->st_cpu_limit = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -2837,6 +3189,8 @@ void update_cgroup_charts(int update_every) {
, RRDSET_TYPE_LINE
);
+ rrdset_update_labels(cg->st_cpu_limit, cg->chart_labels);
+
if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED))
rrddim_add(cg->st_cpu_limit, "used", NULL, 1, system_hz, RRD_ALGORITHM_ABSOLUTE);
else
@@ -2873,7 +3227,7 @@ void update_cgroup_charts(int update_every) {
unsigned int i;
if(unlikely(!cg->st_cpu_per_core)) {
- snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) Per Core for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "CPU Usage (100%% = 1 core) Per Core");
cg->st_cpu_per_core = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -2890,6 +3244,8 @@ void update_cgroup_charts(int update_every) {
, RRDSET_TYPE_STACKED
);
+ rrdset_update_labels(cg->st_cpu_per_core, cg->chart_labels);
+
for(i = 0; i < cg->cpuacct_usage.cpus; i++) {
snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL);
@@ -2907,7 +3263,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_mem)) {
- snprintfz(title, CHART_TITLE_MAX, "Memory Usage for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Memory Usage");
cg->st_mem = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -2923,6 +3279,9 @@ void update_cgroup_charts(int update_every) {
, update_every
, RRDSET_TYPE_STACKED
);
+
+ rrdset_update_labels(cg->st_mem, cg->chart_labels);
+
if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
@@ -2964,7 +3323,7 @@ void update_cgroup_charts(int update_every) {
rrdset_done(cg->st_mem);
if(unlikely(!cg->st_writeback)) {
- snprintfz(title, CHART_TITLE_MAX, "Writeback Memory for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Writeback Memory");
cg->st_writeback = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -2981,6 +3340,8 @@ void update_cgroup_charts(int update_every) {
, RRDSET_TYPE_AREA
);
+ rrdset_update_labels(cg->st_writeback, cg->chart_labels);
+
if(cg->memory.detailed_has_dirty)
rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
@@ -2997,7 +3358,7 @@ void update_cgroup_charts(int update_every) {
if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
if(unlikely(!cg->st_mem_activity)) {
- snprintfz(title, CHART_TITLE_MAX, "Memory Activity for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Memory Activity");
cg->st_mem_activity = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3014,6 +3375,8 @@ void update_cgroup_charts(int update_every) {
, RRDSET_TYPE_LINE
);
+ rrdset_update_labels(cg->st_mem_activity, cg->chart_labels);
+
rrddim_add(cg->st_mem_activity, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(cg->st_mem_activity, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
}
@@ -3026,7 +3389,7 @@ void update_cgroup_charts(int update_every) {
}
if(unlikely(!cg->st_pgfaults)) {
- snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults");
cg->st_pgfaults = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3043,6 +3406,8 @@ void update_cgroup_charts(int update_every) {
, RRDSET_TYPE_LINE
);
+ rrdset_update_labels(cg->st_pgfaults, cg->chart_labels);
+
rrddim_add(cg->st_pgfaults, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(cg->st_pgfaults, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
}
@@ -3056,7 +3421,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_mem_usage)) {
- snprintfz(title, CHART_TITLE_MAX, "Used Memory %sfor cgroup %s", (cgroup_used_memory_without_cache && cg->memory.updated_detailed)?"without Cache ":"", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Used Memory %s", (cgroup_used_memory_without_cache && cg->memory.updated_detailed)?"without Cache ":"");
cg->st_mem_usage = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3073,6 +3438,8 @@ void update_cgroup_charts(int update_every) {
, RRDSET_TYPE_STACKED
);
+ rrdset_update_labels(cg->st_mem_usage, cg->chart_labels);
+
rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
}
@@ -3117,7 +3484,7 @@ void update_cgroup_charts(int update_every) {
memory_limit = cg->memory_limit;
if(unlikely(!cg->st_mem_usage_limit)) {
- snprintfz(title, CHART_TITLE_MAX, "Used RAM without Cache within the limits for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Used RAM without Cache within the limits");
cg->st_mem_usage_limit = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3134,6 +3501,8 @@ void update_cgroup_charts(int update_every) {
, RRDSET_TYPE_STACKED
);
+ rrdset_update_labels(cg->st_mem_usage_limit, cg->chart_labels);
+
rrddim_add(cg->st_mem_usage_limit, "available", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(cg->st_mem_usage_limit, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
}
@@ -3159,7 +3528,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_mem_failcnt)) {
- snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures");
cg->st_mem_failcnt = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3175,6 +3544,8 @@ void update_cgroup_charts(int update_every) {
, update_every
, RRDSET_TYPE_LINE
);
+
+ rrdset_update_labels(cg->st_mem_failcnt, cg->chart_labels);
rrddim_add(cg->st_mem_failcnt, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
@@ -3187,7 +3558,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_io)) {
- snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks) for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks)");
cg->st_io = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3204,6 +3575,8 @@ void update_cgroup_charts(int update_every) {
, RRDSET_TYPE_AREA
);
+ rrdset_update_labels(cg->st_io, cg->chart_labels);
+
rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
}
@@ -3217,7 +3590,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_serviced_ops)) {
- snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks)");
cg->st_serviced_ops = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3234,6 +3607,8 @@ void update_cgroup_charts(int update_every) {
, RRDSET_TYPE_LINE
);
+ rrdset_update_labels(cg->st_serviced_ops, cg->chart_labels);
+
rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
@@ -3247,7 +3622,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_throttle_io)) {
- snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks) for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks)");
cg->st_throttle_io = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3263,6 +3638,8 @@ void update_cgroup_charts(int update_every) {
, update_every
, RRDSET_TYPE_AREA
);
+
+ rrdset_update_labels(cg->st_throttle_io, cg->chart_labels);
rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
@@ -3277,7 +3654,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_throttle_serviced_ops)) {
- snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks)");
cg->st_throttle_serviced_ops = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3293,6 +3670,8 @@ void update_cgroup_charts(int update_every) {
, update_every
, RRDSET_TYPE_LINE
);
+
+ rrdset_update_labels(cg->st_throttle_serviced_ops, cg->chart_labels);
rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -3307,7 +3686,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_queued_ops)) {
- snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks) for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks)");
cg->st_queued_ops = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3323,6 +3702,8 @@ void update_cgroup_charts(int update_every) {
, update_every
, RRDSET_TYPE_LINE
);
+
+ rrdset_update_labels(cg->st_queued_ops, cg->chart_labels);
rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -3337,7 +3718,7 @@ void update_cgroup_charts(int update_every) {
if(likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) {
if(unlikely(!cg->st_merged_ops)) {
- snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks) for cgroup %s", cg->chart_title);
+ snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks)");
cg->st_merged_ops = rrdset_create_localhost(
cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
@@ -3353,6 +3734,8 @@ void update_cgroup_charts(int update_every) {
, update_every
, RRDSET_TYPE_LINE
);
+
+ rrdset_update_labels(cg->st_merged_ops, cg->chart_labels);
rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
@@ -3364,6 +3747,171 @@ void update_cgroup_charts(int update_every) {
rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write);
rrdset_done(cg->st_merged_ops);
}
+
+ if (cg->options & CGROUP_OPTIONS_IS_UNIFIED) {
+ struct pressure *res = &cg->cpu_pressure;
+ if (likely(res->updated && res->some.enabled)) {
+ if (unlikely(!res->some.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "CPU pressure");
+
+ chart = res->some.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "cpu_pressure"
+ , NULL
+ , "cpu"
+ , "cgroup.cpu_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2200
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->some.st, cg->chart_labels);
+
+ res->some.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->some.st);
+ }
+
+ update_pressure_chart(&res->some);
+ }
+
+ res = &cg->memory_pressure;
+ if (likely(res->updated && res->some.enabled)) {
+ if (unlikely(!res->some.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "Memory pressure");
+
+ chart = res->some.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_pressure"
+ , NULL
+ , "mem"
+ , "cgroup.memory_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2300
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->some.st, cg->chart_labels);
+
+ res->some.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->some.st);
+ }
+
+ update_pressure_chart(&res->some);
+ }
+
+ if (likely(res->updated && res->full.enabled)) {
+ if (unlikely(!res->full.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "Memory full pressure");
+
+ chart = res->full.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "mem_full_pressure"
+ , NULL
+ , "mem"
+ , "cgroup.memory_full_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2350
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->full.st, cg->chart_labels);
+
+ res->full.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->full.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->full.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->full.st);
+ }
+
+ update_pressure_chart(&res->full);
+ }
+
+ res = &cg->io_pressure;
+ if (likely(res->updated && res->some.enabled)) {
+ if (unlikely(!res->some.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "I/O pressure");
+
+ chart = res->some.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "io_pressure"
+ , NULL
+ , "disk"
+ , "cgroup.io_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2400
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->some.st, cg->chart_labels);
+
+ res->some.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->some.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->some.st);
+ }
+
+ update_pressure_chart(&res->some);
+ }
+
+ if (likely(res->updated && res->full.enabled)) {
+ if (unlikely(!res->full.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "I/O full pressure");
+
+ chart = res->full.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "io_full_pressure"
+ , NULL
+ , "disk"
+ , "cgroup.io_full_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2450
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_labels(chart = res->full.st, cg->chart_labels);
+
+ res->full.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->full.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ res->full.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(res->full.st);
+ }
+
+ update_pressure_chart(&res->full);
+ }
+ }
}
if(likely(cgroup_enable_systemd_services))
@@ -3385,6 +3933,21 @@ static void cgroup_main_cleanup(void *ptr) {
info("cleaning up...");
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+
+ if (!discovery_thread.exited) {
+ info("stopping discovery thread worker");
+ uv_mutex_unlock(&discovery_thread.mutex);
+ discovery_thread.start_discovery = 1;
+ uv_cond_signal(&discovery_thread.cond_var);
+ }
+
+ while (!discovery_thread.exited && max > 0) {
+ max -= step;
+ info("waiting for discovery thread to finish...");
+ sleep_usec(step);
+ }
+
static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
}
@@ -3400,6 +3963,31 @@ void *cgroups_main(void *ptr) {
RRDSET *stcpu_thread = NULL;
+ if (uv_mutex_init(&cgroup_root_mutex)) {
+ error("CGROUP: cannot initialize mutex for the main cgroup list");
+ goto exit;
+ }
+
+ // dispatch a discovery worker thread
+ discovery_thread.start_discovery = 0;
+ discovery_thread.exited = 0;
+
+ if (uv_mutex_init(&discovery_thread.mutex)) {
+ error("CGROUP: cannot initialize mutex for discovery thread");
+ goto exit;
+ }
+ if (uv_cond_init(&discovery_thread.cond_var)) {
+ error("CGROUP: cannot initialize conditional variable for discovery thread");
+ goto exit;
+ }
+
+ int error = uv_thread_create(&discovery_thread.thread, cgroup_discovery_worker, NULL);
+ if (error) {
+ error("CGROUP: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
+ goto exit;
+ }
+ uv_thread_set_name_np(discovery_thread.thread, "PLUGIN[cgroups]");
+
heartbeat_t hb;
heartbeat_init(&hb);
usec_t step = cgroup_update_every * USEC_PER_SEC;
@@ -3409,19 +3997,18 @@ void *cgroups_main(void *ptr) {
usec_t hb_dt = heartbeat_next(&hb, step);
if(unlikely(netdata_exit)) break;
- // BEGIN -- the job to be done
-
find_dt += hb_dt;
if(unlikely(find_dt >= find_every || cgroups_check)) {
- find_all_cgroups();
+ uv_cond_signal(&discovery_thread.cond_var);
+ discovery_thread.start_discovery = 1;
find_dt = 0;
cgroups_check = 0;
}
+ uv_mutex_lock(&cgroup_root_mutex);
read_all_cgroups(cgroup_root);
update_cgroup_charts(cgroup_update_every);
-
- // END -- the job is done
+ uv_mutex_unlock(&cgroup_root_mutex);
// --------------------------------------------------------------------
@@ -3457,6 +4044,7 @@ void *cgroups_main(void *ptr) {
}
}
+exit:
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h
index 09ce5e3fb..155330ff1 100644
--- a/collectors/cgroups.plugin/sys_fs_cgroup.h
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.h
@@ -28,4 +28,6 @@ extern void *cgroups_main(void *ptr);
#endif // (TARGET_OS == OS_LINUX)
+extern char *parse_k8s_data(struct label **labels, char *data);
+
#endif //NETDATA_SYS_FS_CGROUP_H
diff --git a/collectors/cgroups.plugin/tests/test_cgroups_plugin.c b/collectors/cgroups.plugin/tests/test_cgroups_plugin.c
new file mode 100644
index 000000000..be8ea2c48
--- /dev/null
+++ b/collectors/cgroups.plugin/tests/test_cgroups_plugin.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_cgroups_plugin.h"
+#include "libnetdata/required_dummies.h"
+
+RRDHOST *localhost;
+int netdata_zero_metrics_enabled = 1;
+struct config netdata_config;
+char *netdata_configured_primary_plugins_dir = NULL;
+
+static void test_parse_k8s_data(void **state)
+{
+ UNUSED(state);
+
+ struct label *labels = (struct label *)0xff;
+
+ struct k8s_test_data {
+ char *data;
+ char *name;
+ char *key[3];
+ char *value[3];
+ };
+
+ struct k8s_test_data test_data[] = {
+ // One label
+ { .data = "name label1=\"value1\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value1" },
+
+ // Three labels
+ { .data = "name label1=\"value1\",label2=\"value2\",label3=\"value3\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value1",
+ .key[1] = "label2", .value[1] = "value2",
+ .key[2] = "label3", .value[2] = "value3" },
+
+ // Comma at the end of the data string
+ { .data = "name label1=\"value1\",",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value1" },
+
+ // Equals sign in the value
+ { .data = "name label1=\"value=1\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value=1" },
+
+ // Double quotation mark in the value
+ { .data = "name label1=\"value\"1\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value" },
+
+ // Escaped double quotation mark in the value
+ { .data = "name label1=\"value\\\"1\"",
+ .name = "name",
+ .key[0] = "label1", .value[0] = "value\\\"1" },
+
+ // Equals sign in the key
+ { .data = "name label=1=\"value1\"",
+ .name = "name",
+ .key[0] = "label", .value[0] = "1=\"value1\"" },
+
+ // Skipped value
+ { .data = "name label1=,label2=\"value2\"",
+ .name = "name",
+ .key[0] = "label2", .value[0] = "value2" },
+
+ // A pair of equals signs
+ { .data = "name= =",
+ .name = "name=" },
+
+ // A pair of commas
+ { .data = "name, ,",
+ .name = "name," },
+
+ { .data = NULL }
+ };
+
+ for (int i = 0; test_data[i].data != NULL; i++) {
+ char *data = strdup(test_data[i].data);
+
+ for (int l = 0; l < 3 && test_data[i].key[l] != NULL; l++) {
+ char *key = test_data[i].key[l];
+ char *value = test_data[i].value[l];
+
+ expect_function_call(__wrap_add_label_to_list);
+ expect_value(__wrap_add_label_to_list, l, 0xff);
+ expect_string(__wrap_add_label_to_list, key, key);
+ expect_string(__wrap_add_label_to_list, value, value);
+ expect_value(__wrap_add_label_to_list, label_source, LABEL_SOURCE_KUBERNETES);
+ }
+
+ char *name = parse_k8s_data(&labels, data);
+
+ assert_string_equal(name, test_data[i].name);
+ assert_ptr_equal(labels, 0xff);
+
+ free(data);
+ }
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(test_parse_k8s_data),
+ };
+
+ int test_res = cmocka_run_group_tests_name("test_parse_k8s_data", tests, NULL, NULL);
+
+ return test_res;
+}
diff --git a/collectors/cgroups.plugin/tests/test_cgroups_plugin.h b/collectors/cgroups.plugin/tests/test_cgroups_plugin.h
new file mode 100644
index 000000000..3d68e9230
--- /dev/null
+++ b/collectors/cgroups.plugin/tests/test_cgroups_plugin.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef TEST_CGROUPS_PLUGIN_H
+#define TEST_CGROUPS_PLUGIN_H 1
+
+#include "libnetdata/libnetdata.h"
+
+#include "../sys_fs_cgroup.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <stdint.h>
+#include <cmocka.h>
+
+#endif /* TEST_CGROUPS_PLUGIN_H */
diff --git a/collectors/cgroups.plugin/tests/test_doubles.c b/collectors/cgroups.plugin/tests/test_doubles.c
new file mode 100644
index 000000000..5572fb2f5
--- /dev/null
+++ b/collectors/cgroups.plugin/tests/test_doubles.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_cgroups_plugin.h"
+
+void rrdset_is_obsolete(RRDSET *st)
+{
+ UNUSED(st);
+}
+
+void rrdset_isnot_obsolete(RRDSET *st)
+{
+ UNUSED(st);
+}
+
+struct mountinfo *mountinfo_read(int do_statvfs)
+{
+ UNUSED(do_statvfs);
+
+ return NULL;
+}
+
+struct mountinfo *
+mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source)
+{
+ UNUSED(root);
+ UNUSED(filesystem);
+ UNUSED(mount_source);
+
+ return NULL;
+}
+
+struct mountinfo *
+mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options)
+{
+ UNUSED(root);
+ UNUSED(filesystem);
+ UNUSED(super_options);
+
+ return NULL;
+}
+
+void mountinfo_free_all(struct mountinfo *mi)
+{
+ UNUSED(mi);
+}
+
+struct label *__wrap_add_label_to_list(struct label *l, char *key, char *value, LABEL_SOURCE label_source)
+{
+ function_called();
+ check_expected_ptr(l);
+ check_expected_ptr(key);
+ check_expected_ptr(value);
+ check_expected(label_source);
+ return l;
+}
+
+void rrdset_update_labels(RRDSET *st, struct label *labels)
+{
+ UNUSED(st);
+ UNUSED(labels);
+}
+
+RRDSET *rrdset_create_custom(
+ RRDHOST *host, const char *type, const char *id, const char *name, const char *family, const char *context,
+ const char *title, const char *units, const char *plugin, const char *module, long priority, int update_every,
+ RRDSET_TYPE chart_type, RRD_MEMORY_MODE memory_mode, long history_entries)
+{
+ UNUSED(host);
+ UNUSED(type);
+ UNUSED(id);
+ UNUSED(name);
+ UNUSED(family);
+ UNUSED(context);
+ UNUSED(title);
+ UNUSED(units);
+ UNUSED(plugin);
+ UNUSED(module);
+ UNUSED(priority);
+ UNUSED(update_every);
+ UNUSED(chart_type);
+ UNUSED(memory_mode);
+ UNUSED(history_entries);
+
+ return NULL;
+}
+
+RRDDIM *rrddim_add_custom(
+ RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor,
+ RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode)
+{
+ UNUSED(st);
+ UNUSED(id);
+ UNUSED(name);
+ UNUSED(multiplier);
+ UNUSED(divisor);
+ UNUSED(algorithm);
+ UNUSED(memory_mode);
+
+ return NULL;
+}
+
+collected_number rrddim_set(RRDSET *st, const char *id, collected_number value)
+{
+ UNUSED(st);
+ UNUSED(id);
+ UNUSED(value);
+
+ return 0;
+}
+
+collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value)
+{
+ UNUSED(st);
+ UNUSED(rd);
+ UNUSED(value);
+
+ return 0;
+}
+
+RRDSETVAR *rrdsetvar_custom_chart_variable_create(RRDSET *st, const char *name)
+{
+ UNUSED(st);
+ UNUSED(name);
+
+ return NULL;
+}
+
+void rrdsetvar_custom_chart_variable_set(RRDSETVAR *rs, calculated_number value)
+{
+ UNUSED(rs);
+ UNUSED(value);
+}
+
+void rrdset_next_usec(RRDSET *st, usec_t microseconds)
+{
+ UNUSED(st);
+ UNUSED(microseconds);
+}
+
+void rrdset_done(RRDSET *st)
+{
+ UNUSED(st);
+}
+
+void update_pressure_chart(struct pressure_chart *chart)
+{
+ UNUSED(chart);
+}
+
+void netdev_rename_device_add(
+ const char *host_device, const char *container_device, const char *container_name, struct label *labels)
+{
+ UNUSED(host_device);
+ UNUSED(container_device);
+ UNUSED(container_name);
+ UNUSED(labels);
+}
+
+void netdev_rename_device_del(const char *host_device)
+{
+ UNUSED(host_device);
+}
diff --git a/collectors/charts.d.plugin/.keep b/collectors/charts.d.plugin/.keep
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/charts.d.plugin/.keep
+++ /dev/null
diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am
index b3b2fb927..03c7f0a94 100644
--- a/collectors/charts.d.plugin/Makefile.am
+++ b/collectors/charts.d.plugin/Makefile.am
@@ -31,7 +31,6 @@ dist_charts_DATA = \
userchartsconfigdir=$(configdir)/charts.d
dist_userchartsconfig_DATA = \
- .keep \
$(NULL)
# Explicitly install directories to avoid permission issues due to umask
@@ -43,22 +42,9 @@ dist_chartsconfig_DATA = \
$(NULL)
include ap/Makefile.inc
-include apache/Makefile.inc
include apcupsd/Makefile.inc
-include cpu_apps/Makefile.inc
-include cpufreq/Makefile.inc
include example/Makefile.inc
-include exim/Makefile.inc
-include hddtemp/Makefile.inc
include libreswan/Makefile.inc
-include load_average/Makefile.inc
-include mem_apps/Makefile.inc
-include mysql/Makefile.inc
-include nginx/Makefile.inc
include nut/Makefile.inc
include opensips/Makefile.inc
-include phpfpm/Makefile.inc
-include postfix/Makefile.inc
include sensors/Makefile.inc
-include squid/Makefile.inc
-include tomcat/Makefile.inc
diff --git a/collectors/charts.d.plugin/Makefile.in b/collectors/charts.d.plugin/Makefile.in
deleted file mode 100644
index dd19ca016..000000000
--- a/collectors/charts.d.plugin/Makefile.in
+++ /dev/null
@@ -1,1012 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/charts.d.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_charts_SCRIPTS) \
- $(dist_plugins_SCRIPTS) $(dist_charts_DATA) \
- $(dist_chartsconfig_DATA) $(dist_libconfig_DATA) \
- $(dist_noinst_DATA) $(dist_userchartsconfig_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" \
- "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" \
- "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"
-SCRIPTS = $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_charts_DATA) $(dist_chartsconfig_DATA) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) \
- $(dist_userchartsconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/ap/Makefile.inc \
- $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc \
- $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \
- $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \
- $(srcdir)/hddtemp/Makefile.inc \
- $(srcdir)/libreswan/Makefile.inc \
- $(srcdir)/load_average/Makefile.inc \
- $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc \
- $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc \
- $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc \
- $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc \
- $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc \
- $(top_srcdir)/build/subst.inc
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- charts.d.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_libconfig_DATA = \
- charts.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- charts.d.dryrun-helper.sh \
- charts.d.plugin \
- loopsleepms.sh.inc \
- $(NULL)
-
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA = charts.d.plugin.in README.md $(NULL) ap/README.md \
- ap/Makefile.inc apache/README.md apache/Makefile.inc \
- apcupsd/README.md apcupsd/Makefile.inc cpu_apps/README.md \
- cpu_apps/Makefile.inc cpufreq/README.md cpufreq/Makefile.inc \
- example/README.md example/Makefile.inc exim/README.md \
- exim/Makefile.inc hddtemp/README.md hddtemp/Makefile.inc \
- libreswan/README.md libreswan/Makefile.inc \
- load_average/README.md load_average/Makefile.inc \
- mem_apps/README.md mem_apps/Makefile.inc mysql/README.md \
- mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
- nut/README.md nut/Makefile.inc opensips/README.md \
- opensips/Makefile.inc phpfpm/README.md phpfpm/Makefile.inc \
- postfix/README.md postfix/Makefile.inc sensors/README.md \
- sensors/Makefile.inc squid/README.md squid/Makefile.inc \
- tomcat/README.md tomcat/Makefile.inc
-dist_charts_SCRIPTS = \
- $(NULL)
-
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-dist_charts_DATA = $(NULL) ap/ap.chart.sh apache/apache.chart.sh \
- apcupsd/apcupsd.chart.sh cpu_apps/cpu_apps.chart.sh \
- cpufreq/cpufreq.chart.sh example/example.chart.sh \
- exim/exim.chart.sh hddtemp/hddtemp.chart.sh \
- libreswan/libreswan.chart.sh \
- load_average/load_average.chart.sh mem_apps/mem_apps.chart.sh \
- mysql/mysql.chart.sh nginx/nginx.chart.sh nut/nut.chart.sh \
- opensips/opensips.chart.sh phpfpm/phpfpm.chart.sh \
- postfix/postfix.chart.sh sensors/sensors.chart.sh \
- squid/squid.chart.sh tomcat/tomcat.chart.sh
-userchartsconfigdir = $(configdir)/charts.d
-dist_userchartsconfig_DATA = \
- .keep \
- $(NULL)
-
-chartsconfigdir = $(libconfigdir)/charts.d
-dist_chartsconfig_DATA = $(NULL) ap/ap.conf apache/apache.conf \
- apcupsd/apcupsd.conf cpu_apps/cpu_apps.conf \
- cpufreq/cpufreq.conf example/example.conf exim/exim.conf \
- hddtemp/hddtemp.conf libreswan/libreswan.conf \
- load_average/load_average.conf mem_apps/mem_apps.conf \
- mysql/mysql.conf nginx/nginx.conf nut/nut.conf \
- opensips/opensips.conf phpfpm/phpfpm.conf postfix/postfix.conf \
- sensors/sensors.conf squid/squid.conf tomcat/tomcat.conf
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(am__empty):
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_chartsSCRIPTS: $(dist_charts_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(chartsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(chartsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_chartsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_chartsDATA: $(dist_charts_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsdir)" || exit $$?; \
- done
-
-uninstall-dist_chartsDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir)
-install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_chartsconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_userchartsconfigDATA: $(dist_userchartsconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(userchartsconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(userchartsconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userchartsconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(userchartsconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_userchartsconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(userchartsconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_chartsDATA install-dist_chartsSCRIPTS \
- install-dist_chartsconfigDATA install-dist_libconfigDATA \
- install-dist_pluginsSCRIPTS install-dist_userchartsconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am: install-exec-local
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \
- uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \
- uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_userchartsconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_chartsDATA \
- install-dist_chartsSCRIPTS install-dist_chartsconfigDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_userchartsconfigDATA install-dvi install-dvi-am \
- install-exec install-exec-am install-exec-local install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_chartsDATA \
- uninstall-dist_chartsSCRIPTS uninstall-dist_chartsconfigDATA \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_userchartsconfigDATA
-
-.PRECIOUS: Makefile
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- -e 's#[@]registrydir_POST@#$(registrydir)#g' \
- -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(userchartsconfigdir)
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md
index 6bd115f30..4a7911a60 100644
--- a/collectors/charts.d.plugin/README.md
+++ b/collectors/charts.d.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "charts.d.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/README.md
+-->
+
# charts.d.plugin
`charts.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+.
@@ -55,11 +60,11 @@ For a module called `X`, the following criteria must be met:
the collector cannot be used).
- `X_create()` - creates the Netdata charts, following the standard Netdata plugin guides as described in
- **[External Plugins](../plugins.d/)** (commands `CHART` and `DIMENSION`).
+ **[External Plugins](/collectors/plugins.d/README.md)** (commands `CHART` and `DIMENSION`).
The return value does matter: 0 = OK, 1 = FAILED.
- `X_update()` - collects the values for the defined charts, following the standard Netdata plugin guides
- as described in **[External Plugins](../plugins.d/)** (commands `BEGIN`, `SET`, `END`).
+ as described in **[External Plugins](/collectors/plugins.d/README.md)** (commands `BEGIN`, `SET`, `END`).
The return value also matters: 0 = OK, 1 = FAILED.
5. The following global variables are available to be set:
@@ -67,7 +72,7 @@ For a module called `X`, the following criteria must be met:
The module script may use more functions or variables. But all of them must begin with `X_`.
-The standard Netdata plugin variables are also available (check **[External Plugins](../plugins.d/)**).
+The standard Netdata plugin variables are also available (check **[External Plugins](/collectors/plugins.d/README.md)**).
### X_check()
@@ -81,7 +86,7 @@ connect to a local mysql database to find out if it can read the values it needs
### X_create()
The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard Netdata
-plugin guides (**[External Plugins](../plugins.d/)**).
+plugin guides (**[External Plugins](/collectors/plugins.d/README.md)**).
`X_create()` will be called just once and only after `X_check()` was successful.
You can however call it yourself when there is need for it (for example to add a new dimension to an existing chart).
@@ -91,7 +96,7 @@ A non-zero return value will disable the collector.
### X_update()
`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to Netdata,
-following the Netdata plugin guides (**[External Plugins](../plugins.d/)**).
+following the Netdata plugin guides (**[External Plugins](/collectors/plugins.d/README.md)**).
The function will be called with one parameter: microseconds since the last time it was run. This value should be
appended to the `BEGIN` statement of every chart updated by the collector script.
diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md
index befe21eec..35a00d65d 100644
--- a/collectors/charts.d.plugin/ap/README.md
+++ b/collectors/charts.d.plugin/ap/README.md
@@ -1,4 +1,10 @@
-# Access Point Plugin (ap)
+<!--
+title: "Access point monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/ap/README.md
+sidebar_label: "Access points"
+-->
+
+# Access point monitoring with Netdata
The `ap` collector visualizes data related to access points.
@@ -76,8 +82,15 @@ Station 40:b8:37:5a:ed:5e (on wlan0)
## Configuration
-You can only set `ap_update_every=NUMBER` to `/etc/netdata/charts.d/ap.conf`, to give the data collection frequency.
-To edit this file on your system run `/etc/netdata/edit-config charts.d/ap.conf`.
+Edit the `charts.d/ap.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/ap.conf
+```
+
+You can only set `ap_update_every=NUMBER` to change the data collection frequency.
## Auto-detection
diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh
index a2d04c0a7..5dd787835 100644
--- a/collectors/charts.d.plugin/ap/ap.chart.sh
+++ b/collectors/charts.d.plugin/ap/ap.chart.sh
@@ -16,9 +16,9 @@ declare -A ap_devs=()
# _check is called once, to find out if this chart should be enabled or not
ap_check() {
- require_cmd iw || return 1
- local ev
- ev=$(run iw dev | awk '
+ require_cmd iw || return 1
+ local ev
+ ev=$(run iw dev | awk '
BEGIN {
i = "";
ssid = "";
@@ -41,26 +41,26 @@ ap_check() {
}
}
')
- eval "${ev}"
+ eval "${ev}"
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
- [ ${#ap_devs[@]} -gt 0 ] && return 0
- error "no devices found in AP mode, with 'iw dev'"
- return 1
+ [ ${#ap_devs[@]} -gt 0 ] && return 0
+ error "no devices found in AP mode, with 'iw dev'"
+ return 1
}
# _create is called once, to create the charts
ap_create() {
- local ssid dev
+ local ssid dev
- for dev in "${!ap_devs[@]}"; do
- ssid="${ap_devs[${dev}]}"
+ for dev in "${!ap_devs[@]}"; do
+ ssid="${ap_devs[${dev}]}"
- # create the chart with 3 dimensions
- cat <<EOF
+ # create the chart with 3 dimensions
+ cat << EOF
CHART ap_clients.${dev} '' "Connected clients to ${ssid} on ${dev}" "clients" ${dev} ap.clients line $((ap_priority + 1)) $ap_update_every
DIMENSION clients '' absolute 1 1
@@ -84,25 +84,25 @@ DIMENSION receive '' absolute 1 1000
DIMENSION transmit '' absolute -1 1000
DIMENSION expected 'expected throughput' absolute 1 1000
EOF
- done
+ done
- return 0
+ return 0
}
# _update is called continuously, to collect the values
ap_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- for dev in "${!ap_devs[@]}"; do
- echo
- echo "DEVICE ${dev}"
- iw "${dev}" station dump
- done | awk '
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
+
+ for dev in "${!ap_devs[@]}"; do
+ echo
+ echo "DEVICE ${dev}"
+ iw "${dev}" station dump
+ done | awk '
function zero_data() {
dev = "";
c = 0;
@@ -175,5 +175,5 @@ ap_update() {
}
'
- return 0
+ return 0
}
diff --git a/collectors/charts.d.plugin/apache/README.md b/collectors/charts.d.plugin/apache/README.md
deleted file mode 100644
index 53f02a5b8..000000000
--- a/collectors/charts.d.plugin/apache/README.md
+++ /dev/null
@@ -1,129 +0,0 @@
-# Apache
-
-> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/apache) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
----
-
-The `apache` collector visualizes key performance data for an apache web server.
-
-## Example Netdata charts
-
-For apache 2.2:
-
-![image](https://cloud.githubusercontent.com/assets/2662304/12530273/421c4d14-c1e2-11e5-9fb6-ca6d6dd3b1dd.png)
-
-For apache 2.4:
-
-![image](https://cloud.githubusercontent.com/assets/2662304/12530376/29ec26de-c1e6-11e5-9af1-e48aaf781795.png)
-
-## How it works
-
-It runs `curl "http://apache.host/server-status?auto` to fetch the current status of apache.
-
-It has been tested with apache 2.2 and apache 2.4. The latter also provides connections information (total and break down by status).
-
-Apache 2.2 response:
-
-```sh
-curl "http://127.0.0.1/server-status?auto"
-Total Accesses: 80057
-Total kBytes: 223017
-CPULoad: .018287
-Uptime: 64472
-ReqPerSec: 1.24173
-BytesPerSec: 3542.15
-BytesPerReq: 2852.59
-BusyWorkers: 1
-IdleWorkers: 49
-Scoreboard: _________________________......................................._W_______________________.......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
-```
-
-Apache 2.4 response:
-
-```sh
-curl "http://127.0.0.1/server-status?auto"
-127.0.0.1
-ServerVersion: Apache/2.4.18 (Unix)
-ServerMPM: event
-Server Built: Dec 14 2015 08:05:54
-CurrentTime: Saturday, 23-Jan-2016 14:42:06 EET
-RestartTime: Saturday, 23-Jan-2016 04:57:13 EET
-ParentServerConfigGeneration: 2
-ParentServerMPMGeneration: 1
-ServerUptimeSeconds: 35092
-ServerUptime: 9 hours 44 minutes 52 seconds
-Load1: 0.32
-Load5: 0.32
-Load15: 0.27
-Total Accesses: 32403
-Total kBytes: 34464
-CPUUser: 30.37
-CPUSystem: 29.55
-CPUChildrenUser: 0
-CPUChildrenSystem: 0
-CPULoad: .170751
-Uptime: 35092
-ReqPerSec: .923373
-BytesPerSec: 1005.67
-BytesPerReq: 1089.13
-BusyWorkers: 1
-IdleWorkers: 99
-ConnsTotal: 0
-ConnsAsyncWriting: 0
-ConnsAsyncKeepAlive: 0
-ConnsAsyncClosing: 0
-Scoreboard: __________________________________________________________________________________________W_________............................................................................................................................................................................................................................................................................................................
-```
-
-From the apache status output it collects:
-
-- total accesses (incremental value, rendered as requests/s)
-- total bandwidth (incremental value, rendered as bandwidth/s)
-- requests per second (this appears to be calculated by apache as an average for its lifetime, while the one calculated by Netdata using the total accesses counter is real-time)
-- bytes per second (average for the lifetime of the apache server)
-- bytes per request (average for the lifetime of the apache server)
-- workers by status (`busy` and `idle`)
-- total connections (currently active connections - offered by apache 2.4+)
-- async connections per status (`keepalive`, `writing`, `closing` - offered by apache 2.4+)
-
-## Configuration
-
-The configuration is stored in `/etc/netdata/charts.d/apache.conf`.
-To edit this file on your system run `/etc/netdata/edit-config charts.d/apache.conf`.
-
-The internal default is:
-
-```sh
-# the URL your apache server is responding with mod_status information.
-apache_url="http://127.0.0.1:80/server-status?auto"
-
-# use this to set custom curl options you may need
-apache_curl_opts=
-
-# set this to a NUMBER to overwrite the update frequency
-# it is in seconds
-apache_update_every=
-```
-
-The default `apache_update_every` is configured in Netdata.
-
-## Auto-detection
-
-If you have configured your apache server to offer server-status information on localhost clients, the defaults should work fine.
-
-## Apache Configuration
-
-Apache configuration differs between distributions. Please check your distribution's documentation for information on enabling apache's `mod_status` module.
-
-If you are able to run successfully, by hand this command:
-
-```sh
-curl "http://127.0.0.1:80/server-status?auto"
-```
-
-Netdata will be able to do it too.
-
-Notice: You may need to have the default `000-default.conf` website enabled in order for the status mod to work.
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/apache/apache.chart.sh b/collectors/charts.d.plugin/apache/apache.chart.sh
deleted file mode 100644
index 7d09ee676..000000000
--- a/collectors/charts.d.plugin/apache/apache.chart.sh
+++ /dev/null
@@ -1,251 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# the URL to download apache status info
-apache_url="http://127.0.0.1:80/server-status?auto"
-apache_curl_opts=
-
-# _update_every is a special variable - it holds the number of seconds
-# between the calls of the _update() function
-apache_update_every=
-
-apache_priority=60000
-
-# convert apache floating point values
-# to integer using this multiplier
-# this only affects precision - the values
-# will be in the proper units
-apache_decimal_detail=1000000
-
-declare -a apache_response=()
-apache_accesses=0
-apache_kbytes=0
-apache_reqpersec=0
-apache_bytespersec=0
-apache_bytesperreq=0
-apache_busyworkers=0
-apache_idleworkers=0
-apache_connstotal=0
-apache_connsasyncwriting=0
-apache_connsasynckeepalive=0
-apache_connsasyncclosing=0
-
-apache_keys_detected=0
-apache_has_conns=0
-apache_key_accesses=
-apache_key_kbytes=
-apache_key_reqpersec=
-apache_key_bytespersec=
-apache_key_bytesperreq=
-apache_key_busyworkers=
-apache_key_idleworkers=
-apache_key_scoreboard=
-apache_key_connstotal=
-apache_key_connsasyncwriting=
-apache_key_connsasynckeepalive=
-apache_key_connsasyncclosing=
-apache_detect() {
- local i=0
- for x in "${@}"; do
- case "${x}" in
- 'Total Accesses') apache_key_accesses=$((i + 1)) ;;
- 'Total kBytes') apache_key_kbytes=$((i + 1)) ;;
- 'ReqPerSec') apache_key_reqpersec=$((i + 1)) ;;
- 'BytesPerSec') apache_key_bytespersec=$((i + 1)) ;;
- 'BytesPerReq') apache_key_bytesperreq=$((i + 1)) ;;
- 'BusyWorkers') apache_key_busyworkers=$((i + 1)) ;;
- 'IdleWorkers') apache_key_idleworkers=$((i + 1)) ;;
- 'ConnsTotal') apache_key_connstotal=$((i + 1)) ;;
- 'ConnsAsyncWriting') apache_key_connsasyncwriting=$((i + 1)) ;;
- 'ConnsAsyncKeepAlive') apache_key_connsasynckeepalive=$((i + 1)) ;;
- 'ConnsAsyncClosing') apache_key_connsasyncclosing=$((i + 1)) ;;
- 'Scoreboard') apache_key_scoreboard=$((i)) ;;
- esac
-
- i=$((i + 1))
- done
-
- # we will not check of the Conns*
- # keys, since these are apache 2.4 specific
- [ -z "${apache_key_accesses}" ] && error "missing 'Total Accesses' from apache server: ${*}" && return 1
- [ -z "${apache_key_kbytes}" ] && error "missing 'Total kBytes' from apache server: ${*}" && return 1
- [ -z "${apache_key_reqpersec}" ] && error "missing 'ReqPerSec' from apache server: ${*}" && return 1
- [ -z "${apache_key_bytespersec}" ] && error "missing 'BytesPerSec' from apache server: ${*}" && return 1
- [ -z "${apache_key_bytesperreq}" ] && error "missing 'BytesPerReq' from apache server: ${*}" && return 1
- [ -z "${apache_key_busyworkers}" ] && error "missing 'BusyWorkers' from apache server: ${*}" && return 1
- [ -z "${apache_key_idleworkers}" ] && error "missing 'IdleWorkers' from apache server: ${*}" && return 1
- [ -z "${apache_key_scoreboard}" ] && error "missing 'Scoreboard' from apache server: ${*}" && return 1
-
- if [ ! -z "${apache_key_connstotal}" ] &&
- [ ! -z "${apache_key_connsasyncwriting}" ] &&
- [ ! -z "${apache_key_connsasynckeepalive}" ] &&
- [ ! -z "${apache_key_connsasyncclosing}" ]; then
- apache_has_conns=1
- else
- apache_has_conns=0
- fi
-
- return 0
-}
-
-apache_get() {
- local oIFS="${IFS}" ret
- # shellcheck disable=2207
- IFS=$':\n' apache_response=($(run curl -Ss ${apache_curl_opts} "${apache_url}"))
- ret=$?
- IFS="${oIFS}"
-
- if [ $ret -ne 0 ] || [ "${#apache_response[@]}" -eq 0 ]; then
- return 1
- fi
-
- # the last line on the apache output is "Scoreboard"
- # we use this label to detect that the output has a new word count
- if [ ${apache_keys_detected} -eq 0 ] || [ "${apache_response[${apache_key_scoreboard}]}" != "Scoreboard" ]; then
- apache_detect "${apache_response[@]}" || return 1
- apache_keys_detected=1
- fi
-
- apache_accesses="${apache_response[${apache_key_accesses}]}"
- apache_kbytes="${apache_response[${apache_key_kbytes}]}"
-
- float2int "${apache_response[${apache_key_reqpersec}]}" ${apache_decimal_detail}
- apache_reqpersec=${FLOAT2INT_RESULT}
-
- float2int "${apache_response[${apache_key_bytespersec}]}" ${apache_decimal_detail}
- apache_bytespersec=${FLOAT2INT_RESULT}
-
- float2int "${apache_response[${apache_key_bytesperreq}]}" ${apache_decimal_detail}
- apache_bytesperreq=${FLOAT2INT_RESULT}
-
- apache_busyworkers="${apache_response[${apache_key_busyworkers}]}"
- apache_idleworkers="${apache_response[${apache_key_idleworkers}]}"
-
- if
- [ -z "${apache_accesses}" ] ||
- [ -z "${apache_kbytes}" ] ||
- [ -z "${apache_reqpersec}" ] ||
- [ -z "${apache_bytespersec}" ] ||
- [ -z "${apache_bytesperreq}" ] ||
- [ -z "${apache_busyworkers}" ]
- [ -z "${apache_idleworkers}" ]
- then
- error "empty values got from apache server: ${apache_response[*]}"
- return 1
- fi
-
- if [ ${apache_has_conns} -eq 1 ]; then
- apache_connstotal="${apache_response[${apache_key_connstotal}]}"
- apache_connsasyncwriting="${apache_response[${apache_key_connsasyncwriting}]}"
- apache_connsasynckeepalive="${apache_response[${apache_key_connsasynckeepalive}]}"
- apache_connsasyncclosing="${apache_response[${apache_key_connsasyncclosing}]}"
- fi
-
- return 0
-}
-
-# _check is called once, to find out if this chart should be enabled or not
-apache_check() {
-
- apache_get
- # shellcheck disable=2181
- if [ $? -ne 0 ]; then
- # shellcheck disable=2154
- error "cannot find stub_status on URL '${apache_url}'. Please set apache_url='http://apache.server:80/server-status?auto' in $confd/apache.conf"
- return 1
- fi
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- return 0
-}
-
-# _create is called once, to create the charts
-apache_create() {
- cat <<EOF
-CHART apache_local.bytesperreq '' "apache Lifetime Avg. Response Size" "bytes/request" statistics apache.bytesperreq area $((apache_priority + 8)) $apache_update_every
-DIMENSION size '' absolute 1 ${apache_decimal_detail}
-CHART apache_local.workers '' "apache Workers" "workers" workers apache.workers stacked $((apache_priority + 5)) $apache_update_every
-DIMENSION idle '' absolute 1 1
-DIMENSION busy '' absolute 1 1
-CHART apache_local.reqpersec '' "apache Lifetime Avg. Requests/s" "requests/s" statistics apache.reqpersec line $((apache_priority + 6)) $apache_update_every
-DIMENSION requests '' absolute 1 ${apache_decimal_detail}
-CHART apache_local.bytespersec '' "apache Lifetime Avg. Bandwidth/s" "kilobits/s" statistics apache.bytespersec area $((apache_priority + 7)) $apache_update_every
-DIMENSION sent '' absolute 8 $((apache_decimal_detail * 1000))
-CHART apache_local.requests '' "apache Requests" "requests/s" requests apache.requests line $((apache_priority + 1)) $apache_update_every
-DIMENSION requests '' incremental 1 1
-CHART apache_local.net '' "apache Bandwidth" "kilobits/s" bandwidth apache.net area $((apache_priority + 3)) $apache_update_every
-DIMENSION sent '' incremental 8 1
-EOF
-
- if [ ${apache_has_conns} -eq 1 ]; then
- cat <<EOF2
-CHART apache_local.connections '' "apache Connections" "connections" connections apache.connections line $((apache_priority + 2)) $apache_update_every
-DIMENSION connections '' absolute 1 1
-CHART apache_local.conns_async '' "apache Async Connections" "connections" connections apache.conns_async stacked $((apache_priority + 4)) $apache_update_every
-DIMENSION keepalive '' absolute 1 1
-DIMENSION closing '' absolute 1 1
-DIMENSION writing '' absolute 1 1
-EOF2
- fi
-
- return 0
-}
-
-# _update is called continuously, to collect the values
-apache_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- apache_get || return 1
-
- # write the result of the work.
- cat <<VALUESEOF
-BEGIN apache_local.requests $1
-SET requests = $((apache_accesses))
-END
-BEGIN apache_local.net $1
-SET sent = $((apache_kbytes))
-END
-BEGIN apache_local.reqpersec $1
-SET requests = $((apache_reqpersec))
-END
-BEGIN apache_local.bytespersec $1
-SET sent = $((apache_bytespersec))
-END
-BEGIN apache_local.bytesperreq $1
-SET size = $((apache_bytesperreq))
-END
-BEGIN apache_local.workers $1
-SET idle = $((apache_idleworkers))
-SET busy = $((apache_busyworkers))
-END
-VALUESEOF
-
- if [ ${apache_has_conns} -eq 1 ]; then
- cat <<VALUESEOF2
-BEGIN apache_local.connections $1
-SET connections = $((apache_connstotal))
-END
-BEGIN apache_local.conns_async $1
-SET keepalive = $((apache_connsasynckeepalive))
-SET closing = $((apache_connsasyncclosing))
-SET writing = $((apache_connsasyncwriting))
-END
-VALUESEOF2
- fi
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/apache/apache.conf b/collectors/charts.d.plugin/apache/apache.conf
deleted file mode 100644
index 50914cf32..000000000
--- a/collectors/charts.d.plugin/apache/apache.conf
+++ /dev/null
@@ -1,30 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-# the URL to download apache status info
-#apache_url="http://127.0.0.1:80/server-status?auto"
-#apache_curl_opts=
-
-# convert apache floating point values
-# to integer using this multiplier
-# this only affects precision - the values
-# will be in the proper units
-#apache_decimal_detail=1000000
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#apache_update_every=
-
-# the charts priority on the dashboard
-#apache_priority=60000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#apache_retries=10
diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md
index 51bb6eccd..b5b41e84d 100644
--- a/collectors/charts.d.plugin/apcupsd/README.md
+++ b/collectors/charts.d.plugin/apcupsd/README.md
@@ -1,7 +1,21 @@
-# apcupsd
+<!--
+title: "APC UPS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/apcupsd/README.md
+sidebar_label: "APC UPS"
+-->
-_Under construction_
+# APC UPS monitoring with Netdata
-Collects UPS metrics
+Monitors different APC UPS models and retrieves status information using `apcaccess` tool.
+
+## Configuration
+
+Edit the `charts.d/apcupsd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/apcupsd.conf
+```
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapcupsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
index 31ff93160..014a9c1de 100644
--- a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
@@ -11,7 +11,7 @@ apcupsd_ip=
apcupsd_port=
declare -A apcupsd_sources=(
- ["local"]="127.0.0.1:3551"
+ ["local"]="127.0.0.1:3551"
)
# how frequently to collect UPS data
@@ -23,55 +23,63 @@ apcupsd_timeout=3
apcupsd_priority=90000
apcupsd_get() {
- run -t $apcupsd_timeout apcaccess status "$1"
+ run -t $apcupsd_timeout apcaccess status "$1"
+}
+
+is_ups_alive() {
+ local status
+ status="$(apcupsd_get "$1" | sed -e 's/STATUS.*: //' -e 't' -e 'd')"
+ case "$status" in
+ "" | "COMMLOST" | "SHUTTING DOWN") return 1 ;;
+ *) return 0 ;;
+ esac
}
apcupsd_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- require_cmd apcaccess || return 1
-
- # backwards compatibility
- if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]; then
- apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}"
- fi
-
- local host working=0 failed=0
- for host in "${!apcupsd_sources[@]}"; do
- run apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null
- # shellcheck disable=2181
- if [ $? -ne 0 ]; then
- error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}."
- failed=$((failed + 1))
- else
- apcupsd_status="$(apcupsd_get ${apcupsd_sources[${host}]} | awk '/^STATUS.*/{ print $3 }')"
- if [ "${apcupsd_status}" != "ONLINE" ] && [ "${apcupsd_status}" != "ONBATT" ]; then
- error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online."
- failed=$((failed + 1))
- else
- working=$((working + 1))
- fi
- fi
- done
-
- if [ ${working} -eq 0 ]; then
- error "No APC UPSes found available."
- return 1
- fi
-
- return 0
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ require_cmd apcaccess || return 1
+
+ # backwards compatibility
+ if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]; then
+ apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}"
+ fi
+
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"; do
+ run apcupsd_get "${apcupsd_sources[${host}]}" > /dev/null
+ # shellcheck disable=2181
+ if [ $? -ne 0 ]; then
+ error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}."
+ failed=$((failed + 1))
+ else
+ if ! is_ups_alive ${apcupsd_sources[${host}]}; then
+ error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online."
+ failed=$((failed + 1))
+ else
+ working=$((working + 1))
+ fi
+ fi
+ done
+
+ if [ ${working} -eq 0 ]; then
+ error "No APC UPSes found available."
+ return 1
+ fi
+
+ return 0
}
apcupsd_create() {
- local host src
- for host in "${!apcupsd_sources[@]}"; do
- src=${apcupsd_sources[${host}]}
+ local host src
+ for host in "${!apcupsd_sources[@]}"; do
+ src=${apcupsd_sources[${host}]}
- # create the charts
- cat <<EOF
+ # create the charts
+ cat << EOF
CHART apcupsd_${host}.charge '' "UPS Charge for ${host} on ${src}" "percentage" ups apcupsd.charge area $((apcupsd_priority + 1)) $apcupsd_update_every
DIMENSION battery_charge charge absolute 1 100
@@ -104,21 +112,21 @@ CHART apcupsd_${host}.online '' "UPS ONLINE flag for ${host} on ${src}" "boolean
DIMENSION online online absolute 0 1
EOF
- done
- return 0
+ done
+ return 0
}
apcupsd_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
- local host working=0 failed=0
- for host in "${!apcupsd_sources[@]}"; do
- apcupsd_get "${apcupsd_sources[${host}]}" | awk "
+ local host working=0 failed=0
+ for host in "${!apcupsd_sources[@]}"; do
+ apcupsd_get "${apcupsd_sources[${host}]}" | awk "
BEGIN {
battery_charge = 0;
@@ -190,16 +198,16 @@ END {
print \"END\"
}
}"
- # shellcheck disable=SC2181
- if [ $? -ne 0 ]; then
- failed=$((failed + 1))
- error "failed to get values for APC UPS ${host} on ${apcupsd_sources[${host}]}" && return 1
- else
- working=$((working + 1))
- fi
- done
-
- [ $working -eq 0 ] && error "failed to get values from all APC UPSes" && return 1
-
- return 0
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]; then
+ failed=$((failed + 1))
+ error "failed to get values for APC UPS ${host} on ${apcupsd_sources[${host}]}" && return 1
+ else
+ working=$((working + 1))
+ fi
+ done
+
+ [ $working -eq 0 ] && error "failed to get values from all APC UPSes" && return 1
+
+ return 0
}
diff --git a/collectors/charts.d.plugin/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf
index 94c40cf6f..d6add5e5b 100644
--- a/collectors/charts.d.plugin/charts.d.conf
+++ b/collectors/charts.d.plugin/charts.d.conf
@@ -45,21 +45,3 @@
# Nothing useful.
# Just an example charts.d plugin you can use as a template.
# example=force
-
-# OLD MODULES THAT ARE NOW SERVED BY python.d.plugin
-# apache=force
-# cpufreq=force
-# exim=force
-# hddtemp=force
-# mysql=force
-# nginx=force
-# phpfpm=force
-# postfix=force
-# sensors=force
-# squid=force
-# tomcat=force
-
-# OLD MODULES THAT ARE NOW SERVED BY NETDATA DAEMON
-# cpu_apps=force
-# mem_apps=force
-# load_average=force
diff --git a/collectors/charts.d.plugin/charts.d.plugin b/collectors/charts.d.plugin/charts.d.plugin
deleted file mode 100644
index 40dc48c15..000000000
--- a/collectors/charts.d.plugin/charts.d.plugin
+++ /dev/null
@@ -1,698 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# charts.d.plugin allows easy development of BASH plugins
-#
-# if you need to run parallel charts.d processes, link this file to a different name
-# in the same directory, with a .plugin suffix and netdata will start both of them,
-# each will have a different config file and modules configuration directory.
-#
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
-
-PROGRAM_FILE="$0"
-PROGRAM_NAME="$(basename $0)"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
-MODULE_NAME="main"
-
-# -----------------------------------------------------------------------------
-# create temp dir
-
-debug=0
-TMP_DIR=
-chartsd_cleanup() {
- trap '' EXIT QUIT HUP INT TERM
-
- if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]; then
- [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
- rm -rf "$TMP_DIR"
- fi
- exit 0
-}
-trap chartsd_cleanup EXIT QUIT HUP INT TERM
-
-if [ $UID = "0" ]; then
- TMP_DIR="$(mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
-else
- TMP_DIR="$(mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
-fi
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- echo "DISABLE"
- exit 1
-}
-
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-# check a few commands
-
-require_cmd() {
- local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
- if [ -z "${x}" -o ! -x "${x}" ]; then
- warning "command '${1}' is not found in ${PATH}."
- eval "${1^^}_CMD=\"\""
- return 1
- fi
-
- eval "${1^^}_CMD=\"${x}\""
- return 0
-}
-
-require_cmd date || exit 1
-require_cmd sed || exit 1
-require_cmd basename || exit 1
-require_cmd dirname || exit 1
-require_cmd cat || exit 1
-require_cmd grep || exit 1
-require_cmd egrep || exit 1
-require_cmd mktemp || exit 1
-require_cmd awk || exit 1
-require_cmd timeout || exit 1
-require_cmd curl || exit 1
-
-# -----------------------------------------------------------------------------
-
-[ $((BASH_VERSINFO[0])) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
-
-info "started from '$PROGRAM_FILE' with options: $*"
-
-# -----------------------------------------------------------------------------
-# internal defaults
-# netdata exposes a few environment variables for us
-
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
-
-pluginsd="${NETDATA_PLUGINS_DIR}"
-stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}"
-userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}"
-olduserconfd="${NETDATA_USER_CONFIG_DIR}"
-chartsd="$pluginsd/../charts.d"
-
-minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
-update_every=${minimum_update_frequency} # this will be overwritten by the command line
-
-# work around for non BASH shells
-charts_create="_create"
-charts_update="_update"
-charts_check="_check"
-charts_undescore="_"
-
-# when making iterations, charts.d can loop more frequently
-# to prevent plugins missing iterations.
-# this is a percentage relative to update_every to align its
-# iterations.
-# The minimum is 10%, the maximum 100%.
-# So, if update_every is 1 second and time_divisor is 50,
-# charts.d will iterate every 500ms.
-# Charts will be called to collect data only if the time
-# passed since the last time the collected data is equal or
-# above their update_every.
-time_divisor=50
-
-# number of seconds to run without restart
-# after this time, charts.d.plugin will exit
-# netdata will restart it
-restart_timeout=$((3600 * 4))
-
-# check if the charts.d plugins are using global variables
-# they should not.
-# It does not currently support BASH v4 arrays, so it is
-# disabled
-dryrunner=0
-
-# check for timeout command
-check_for_timeout=1
-
-# the default enable/disable value for all charts
-enable_all_charts="yes"
-
-# -----------------------------------------------------------------------------
-# parse parameters
-
-check=0
-chart_only=
-while [ ! -z "$1" ]; do
- if [ "$1" = "check" ]; then
- check=1
- shift
- continue
- fi
-
- if [ "$1" = "debug" -o "$1" = "all" ]; then
- debug=1
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1.chart.sh" ]; then
- debug=1
- chart_only="$(echo $1.chart.sh | sed "s/\.chart\.sh$//g")"
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1" ]; then
- debug=1
- chart_only="$(echo $1 | sed "s/\.chart\.sh$//g")"
- shift
- continue
- fi
-
- # number check
- n="$1"
- x=$((n))
- if [ "$x" = "$n" ]; then
- shift
- update_every=$x
- [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
- continue
- fi
-
- fatal "Cannot understand parameter $1. Aborting."
-done
-
-# -----------------------------------------------------------------------------
-# loop control
-
-# default sleep function
-LOOPSLEEPMS_HIGHRES=0
-now_ms=
-current_time_ms_default() {
- now_ms="$(date +'%s')000"
-}
-current_time_ms="current_time_ms_default"
-current_time_ms_accuracy=1
-mysleep="sleep"
-
-# if found and included, this file overwrites loopsleepms()
-# and current_time_ms() with a high resolution timer function
-# for precise looping.
-source "$pluginsd/loopsleepms.sh.inc"
-[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'."
-
-# -----------------------------------------------------------------------------
-# load my configuration
-
-for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"; do
- if [ -f "$myconfig" ]; then
- source "$myconfig"
- if [ $? -ne 0 ]; then
- error "Config file '$myconfig' loaded with errors."
- else
- info "Configuration file '$myconfig' loaded."
- fi
- else
- warning "Configuration file '$myconfig' not found."
- fi
-done
-
-# make sure time_divisor is right
-time_divisor=$((time_divisor))
-[ $time_divisor -lt 10 ] && time_divisor=10
-[ $time_divisor -gt 100 ] && time_divisor=100
-
-# we check for the timeout command, after we load our
-# configuration, so that the user may overwrite the
-# timeout command we use, providing a function that
-# can emulate the timeout command we need:
-# > timeout SECONDS command ...
-if [ $check_for_timeout -eq 1 ]; then
- require_cmd timeout || exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# internal checks
-
-# netdata passes the requested update frequency as the first argument
-update_every=$((update_every + 1 - 1)) # makes sure it is a number
-test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
-
-# check the charts.d directory
-[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'"
-
-# -----------------------------------------------------------------------------
-# library functions
-
-fixid() {
- echo "$*" |
- tr -c "[A-Z][a-z][0-9]" "_" |
- sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |
- tr "[A-Z]" "[a-z]"
-}
-
-run() {
- local ret pid="${BASHPID}" t
-
- if [ "z${1}" = "z-t" -a "${2}" != "0" ]; then
- t="${2}"
- shift 2
- case "${NETDATA_SYSTEM_OS_ID}" in
- "alpine")
- timeout -t ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
- ;;
- *)
- timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
- ;;
- esac
- ret=$?
- else
- "${@}" 2>"${TMP_DIR}/run.${pid}"
- ret=$?
- fi
-
- if [ ${ret} -ne 0 ]; then
- {
- printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
- printf "%q " "${@}"
- printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
- cat "${TMP_DIR}/run.${pid}"
- printf " --- END TRACE ---\n"
- } >&2
- fi
- rm -f "${TMP_DIR}/run.${pid}"
-
- return ${ret}
-}
-
-# convert any floating point number
-# to integer, give a multiplier
-# the result is stored in ${FLOAT2INT_RESULT}
-# so that no fork is necessary
-# the multiplier must be a power of 10
-float2int() {
- local f m="$2" a b l v=($1)
- f=${v[0]}
-
- # the length of the multiplier - 1
- l=$((${#m} - 1))
-
- # check if the number is in scientific notation
- if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]; then
- # convert it to decimal
- # unfortunately, this fork cannot be avoided
- # if you know of a way to avoid it, please let me know
- f=$(printf "%0.${l}f" ${f})
- fi
-
- # split the floating point number
- # in integer (a) and decimal (b)
- a=${f/.*/}
- b=${f/*./}
-
- # if the integer part is missing
- # set it to zero
- [ -z "${a}" ] && a="0"
-
- # strip leading zeros from the integer part
- # base 10 convertion
- a=$((10#$a))
-
- # check the length of the decimal part
- # against the length of the multiplier
- if [ ${#b} -gt ${l} ]; then
- # too many digits - take the most significant
- b=${b:0:l}
-
- elif [ ${#b} -lt ${l} ]; then
- # too few digits - pad with zero on the right
- local z="00000000000000000000000" r=$((l - ${#b}))
- b="${b}${z:0:r}"
- fi
-
- # strip leading zeros from the decimal part
- # base 10 convertion
- b=$((10#$b))
-
- # store the result
- FLOAT2INT_RESULT=$(((a * m) + b))
-}
-
-# -----------------------------------------------------------------------------
-# charts check functions
-
-all_charts() {
- cd "$chartsd"
- [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
-
- ls *.chart.sh | sed "s/\.chart\.sh$//g"
-}
-
-declare -A charts_enable_keyword=(
- ['apache']="force"
- ['cpu_apps']="force"
- ['cpufreq']="force"
- ['example']="force"
- ['exim']="force"
- ['hddtemp']="force"
- ['load_average']="force"
- ['mem_apps']="force"
- ['mysql']="force"
- ['nginx']="force"
- ['phpfpm']="force"
- ['postfix']="force"
- ['sensors']="force"
- ['squid']="force"
- ['tomcat']="force"
-)
-
-all_enabled_charts() {
- local charts= enabled= required=
-
- # find all enabled charts
-
- for chart in $(all_charts); do
- MODULE_NAME="${chart}"
-
- eval "enabled=\$$chart"
- if [ -z "${enabled}" ]; then
- enabled="${enable_all_charts}"
- fi
-
- required="${charts_enable_keyword[${chart}]}"
- [ -z "${required}" ] && required="yes"
-
- if [ ! "${enabled}" = "${required}" ]; then
- info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
- else
- debug "is enabled for auto-detection."
- local charts="$charts $chart"
- fi
- done
- MODULE_NAME="main"
-
- local charts2=
- for chart in $charts; do
- MODULE_NAME="${chart}"
-
- # check the enabled charts
- local check="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()")"
- if [ -z "$check" ]; then
- error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
- continue
- fi
-
- local create="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()")"
- if [ -z "$create" ]; then
- error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
- continue
- fi
-
- local update="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()")"
- if [ -z "$update" ]; then
- error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
- continue
- fi
-
- # check its config
- #if [ -f "$userconfd/$chart.conf" ]
- #then
- # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
- # then
- # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
- # continue
- # fi
- #fi
-
- #if [ $dryrunner -eq 1 ]
- # then
- # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
- # if [ $? -ne 0 ]
- # then
- # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
- # continue
- # fi
- #fi
-
- local charts2="$charts2 $chart"
- done
- MODULE_NAME="main"
-
- echo $charts2
- debug "enabled charts: $charts2"
-}
-
-# -----------------------------------------------------------------------------
-# load the charts
-
-suffix_retries="_retries"
-suffix_update_every="_update_every"
-active_charts=
-for chart in $(all_enabled_charts); do
- MODULE_NAME="${chart}"
-
- debug "loading module: '$chartsd/$chart.chart.sh'"
-
- source "$chartsd/$chart.chart.sh"
- [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
-
- # first load the stock config
- if [ -f "$stockconfd/$chart.conf" ]; then
- debug "loading module configuration: '$stockconfd/$chart.conf'"
- source "$stockconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
- else
- debug "not found module configuration: '$stockconfd/$chart.conf'"
- fi
-
- # then load the user config (it overwrites the stock)
- if [ -f "$userconfd/$chart.conf" ]; then
- debug "loading module configuration: '$userconfd/$chart.conf'"
- source "$userconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
- else
- debug "not found module configuration: '$userconfd/$chart.conf'"
-
- if [ -f "$olduserconfd/$chart.conf" ]; then
- # support for very old netdata that had the charts.d module configs in /etc/netdata
- info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
- source "$olduserconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
- fi
- fi
-
- eval "dt=\$$chart$suffix_update_every"
- dt=$((dt + 1 - 1)) # make sure it is a number
- if [ $dt -lt $update_every ]; then
- eval "$chart$suffix_update_every=$update_every"
- fi
-
- $chart$charts_check
- if [ $? -eq 0 ]; then
- debug "module '$chart' activated"
- active_charts="$active_charts $chart"
- else
- error "module's '$chart' check() function reports failure."
- fi
-done
-MODULE_NAME="main"
-debug "activated modules: $active_charts"
-
-# -----------------------------------------------------------------------------
-# check overwrites
-
-# enable work time reporting
-debug_time=
-test $debug -eq 1 && debug_time=tellwork
-
-# if we only need a specific chart, remove all the others
-if [ ! -z "${chart_only}" ]; then
- debug "requested to run only for: '${chart_only}'"
- check_charts=
- for chart in $active_charts; do
- if [ "$chart" = "$chart_only" ]; then
- check_charts="$chart"
- break
- fi
- done
- active_charts="$check_charts"
-fi
-debug "activated charts: $active_charts"
-
-# stop if we just need a pre-check
-if [ $check -eq 1 ]; then
- info "CHECK RESULT"
- info "Will run the charts: $active_charts"
- exit 0
-fi
-
-# -----------------------------------------------------------------------------
-
-cd "${TMP_DIR}" || exit 1
-
-# -----------------------------------------------------------------------------
-# create charts
-
-run_charts=
-for chart in $active_charts; do
- MODULE_NAME="${chart}"
-
- debug "calling '$chart$charts_create()'..."
- $chart$charts_create
- if [ $? -eq 0 ]; then
- run_charts="$run_charts $chart"
- debug "'$chart' initialized."
- else
- error "module's '$chart' function '$chart$charts_create()' reports failure."
- fi
-done
-MODULE_NAME="main"
-debug "run_charts='$run_charts'"
-
-# -----------------------------------------------------------------------------
-# update dimensions
-
-[ -z "$run_charts" ] && fatal "No charts to collect data from."
-
-declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
-global_update() {
- local exit_at \
- c=0 dt ret last_ms exec_start_ms exec_end_ms \
- chart now_charts=() next_charts=($run_charts) \
- next_ms x seconds millis
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- exit_at=$((now_ms + (restart_timeout * 1000)))
-
- for chart in $run_charts; do
- eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
- test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
-
- eval "charts_retries[$chart]=\$$chart$suffix_retries"
- test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
-
- charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000))))
- charts_next_update[$chart]=$((charts_last_update[$chart] + (charts_update_every[$chart] * 1000)))
- charts_run_counter[$chart]=0
- charts_serial_failures[$chart]=0
-
- echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}"
- echo "DIMENSION run_time 'run time' absolute 1 1"
- done
-
- # the main loop
- while [ "${#next_charts[@]}" -gt 0 ]; do
- c=$((c + 1))
- now_charts=("${next_charts[@]}")
- next_charts=()
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- for chart in "${now_charts[@]}"; do
- MODULE_NAME="${chart}"
-
- if [ ${now_ms} -ge ${charts_next_update[$chart]} ]; then
- last_ms=${charts_last_update[$chart]}
- dt=$((now_ms - last_ms))
-
- charts_last_update[$chart]=${now_ms}
-
- while [ ${charts_next_update[$chart]} -lt ${now_ms} ]; do
- charts_next_update[$chart]=$((charts_next_update[$chart] + (charts_update_every[$chart] * 1000)))
- done
-
- # the first call should not give a duration
- # so that netdata calibrates to current time
- dt=$((dt * 1000))
- charts_run_counter[$chart]=$((charts_run_counter[$chart] + 1))
- if [ ${charts_run_counter[$chart]} -eq 1 ]; then
- dt=
- fi
-
- exec_start_ms=$now_ms
- $chart$charts_update $dt
- ret=$?
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
- exec_end_ms=$now_ms
-
- echo "BEGIN netdata.plugin_chartsd_$chart $dt"
- echo "SET run_time = $((exec_end_ms - exec_start_ms))"
- echo "END"
-
- if [ $ret -eq 0 ]; then
- charts_serial_failures[$chart]=0
- next_charts+=($chart)
- else
- charts_serial_failures[$chart]=$((charts_serial_failures[$chart] + 1))
-
- if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]; then
- error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
- else
- error "module's '$chart' update() function reports failure. Will keep trying for a while."
- next_charts+=($chart)
- fi
- fi
- else
- next_charts+=($chart)
- fi
- done
- MODULE_NAME="${chart}"
-
- # wait the time you are required to
- next_ms=$((now_ms + (update_every * 1000 * 100)))
- for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
- next_ms=$((next_ms - now_ms))
-
- if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]; then
- next_ms=$((next_ms + current_time_ms_accuracy))
- seconds=$((next_ms / 1000))
- millis=$((next_ms % 1000))
- if [ ${millis} -lt 10 ]; then
- millis="00${millis}"
- elif [ ${millis} -lt 100 ]; then
- millis="0${millis}"
- fi
-
- debug "sleeping for ${seconds}.${millis} seconds."
- ${mysleep} ${seconds}.${millis}
- else
- debug "sleeping for ${update_every} seconds."
- ${mysleep} $update_every
- fi
-
- test ${now_ms} -ge ${exit_at} && exit 0
- done
-
- fatal "nothing left to do, exiting..."
-}
-
-global_update
diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in
index 0df6c30c3..62363f3db 100755
--- a/collectors/charts.d.plugin/charts.d.plugin.in
+++ b/collectors/charts.d.plugin/charts.d.plugin.in
@@ -275,20 +275,26 @@ fixid() {
tr "[A-Z]" "[a-z]"
}
+isvarset() {
+ [ -n "$1" ] && [ "$1" != "unknown" ] && [ "$1" != "none" ]
+ return $?
+}
+
+getosid() {
+ if isvarset "${NETDATA_CONTAINER_OS_ID}"; then
+ echo "${NETDATA_CONTAINER_OS_ID}"
+ else
+ echo "${NETDATA_SYSTEM_OS_ID}"
+ fi
+}
+
run() {
local ret pid="${BASHPID}" t
if [ "z${1}" = "z-t" -a "${2}" != "0" ]; then
t="${2}"
shift 2
- case "${NETDATA_SYSTEM_OS_ID}" in
- "alpine")
- timeout -t ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
- ;;
- *)
- timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}"
- ;;
- esac
+ timeout "${t}" "${@}" 2>"${TMP_DIR}/run.${pid}"
ret=$?
else
"${@}" 2>"${TMP_DIR}/run.${pid}"
@@ -390,14 +396,34 @@ declare -A charts_enable_keyword=(
['tomcat']="force"
)
+declare -A obsolete_charts=(
+ ['apache']="python.d.plugin module"
+ ['cpu_apps']="apps.plugin"
+ ['cpufreq']="proc plugin"
+ ['exim']="python.d.plugin module"
+ ['hddtemp']="python.d.plugin module"
+ ['load_average']="proc plugin"
+ ['mem_apps']="proc plugin"
+ ['mysql']="python.d.plugin module"
+ ['nginx']="python.d.plugin module"
+ ['phpfpm']="python.d.plugin module"
+ ['postfix']="python.d.plugin module"
+ ['squid']="python.d.plugin module"
+ ['tomcat']="python.d.plugin module"
+)
+
all_enabled_charts() {
- local charts= enabled= required=
+ local charts enabled required
# find all enabled charts
-
for chart in $(all_charts); do
MODULE_NAME="${chart}"
+ if [ -n "${obsolete_charts["$MODULE_NAME"]}" ]; then
+ debug "is replaced by ${obsolete_charts["$MODULE_NAME"]}, skipping it."
+ continue
+ fi
+
eval "enabled=\$$chart"
if [ -z "${enabled}" ]; then
enabled="${enable_all_charts}"
@@ -578,6 +604,12 @@ debug "run_charts='$run_charts'"
[ -z "$run_charts" ] && fatal "No charts to collect data from."
+keepalive() {
+ if [ ! -t 1 ] && ! printf "\n"; then
+ chartsd_cleanup
+ fi
+}
+
declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
global_update() {
local exit_at \
@@ -608,6 +640,8 @@ global_update() {
# the main loop
while [ "${#next_charts[@]}" -gt 0 ]; do
+ keepalive
+
c=$((c + 1))
now_charts=("${next_charts[@]}")
next_charts=()
diff --git a/collectors/charts.d.plugin/cpu_apps/Makefile.inc b/collectors/charts.d.plugin/cpu_apps/Makefile.inc
deleted file mode 100644
index a35f82837..000000000
--- a/collectors/charts.d.plugin/cpu_apps/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += cpu_apps/cpu_apps.chart.sh
-dist_chartsconfig_DATA += cpu_apps/cpu_apps.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += cpu_apps/README.md cpu_apps/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/cpu_apps/README.md b/collectors/charts.d.plugin/cpu_apps/README.md
deleted file mode 100644
index c8230aa4f..000000000
--- a/collectors/charts.d.plugin/cpu_apps/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# cpu_apps
-
-> THIS MODULE IS OBSOLETE.
-> USE [APPS.PLUGIN](../../apps.plugin).
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpu_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
deleted file mode 100644
index e91c46d54..000000000
--- a/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
+++ /dev/null
@@ -1,70 +0,0 @@
-# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-# THIS PLUGIN IS OBSOLETE
-# USE apps.plugin INSTEAD
-
-# a space separated list of command to monitor
-cpu_apps_apps=
-
-# these are required for computing memory in bytes and cpu in seconds
-#cpu_apps_pagesize="`getconf PAGESIZE`"
-cpu_apps_clockticks="$(getconf CLK_TCK)"
-
-cpu_apps_update_every=60
-
-cpu_apps_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- if [ -z "$cpu_apps_apps" ]; then
- error "manual configuration required: please set cpu_apps_apps='command1 command2 ...' in $confd/cpu_apps_apps.conf"
- return 1
- fi
- return 0
-}
-
-cpu_apps_bc_finalze=
-
-cpu_apps_create() {
-
- echo "CHART chartsd_apps.cpu '' 'Apps CPU' 'milliseconds / $cpu_apps_update_every sec' apps apps stacked 20001 $cpu_apps_update_every"
-
- local x=
- for x in $cpu_apps_apps; do
- echo "DIMENSION $x $x incremental 1000 $cpu_apps_clockticks"
-
- # this string is needed later in the update() function
- # to finalize the instructions for the bc command
- cpu_apps_bc_finalze="$cpu_apps_bc_finalze \"SET $x = \"; $x;"
- done
- return 0
-}
-
-cpu_apps_update() {
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- echo "BEGIN chartsd_apps.cpu"
- ps -o pid,comm -C "$cpu_apps_apps" |
- grep -v "COMMAND" |
- (
- while read pid name; do
- echo "$name+=$(cat /proc/$pid/stat | cut -d ' ' -f 14-15)"
- done
- ) |
- (
- sed -e "s/ \+/ /g" -e "s/ /+/g"
- echo "$cpu_apps_bc_finalze"
- ) | bc
- echo "END"
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf
deleted file mode 100644
index 850cd0c6f..000000000
--- a/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf
+++ /dev/null
@@ -1,19 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# app.plugin can do better
-
-#cpu_apps_apps=
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#cpu_apps_update_every=2
-
-# the number of retries to do in case of failure
-# before disabling the module
-#cpu_apps_retries=10
diff --git a/collectors/charts.d.plugin/cpufreq/README.md b/collectors/charts.d.plugin/cpufreq/README.md
deleted file mode 100644
index fc2bfca19..000000000
--- a/collectors/charts.d.plugin/cpufreq/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# cpufreq
-
-> THIS MODULE IS OBSOLETE.
-> USE THE [PROC PLUGIN](../../proc.plugin) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
deleted file mode 100644
index 68708d911..000000000
--- a/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
+++ /dev/null
@@ -1,88 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# if this chart is called X.chart.sh, then all functions and global variables
-# must start with X_
-
-cpufreq_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
-cpufreq_sys_depth=10
-cpufreq_source_update=1
-
-# _update_every is a special variable - it holds the number of seconds
-# between the calls of the _update() function
-cpufreq_update_every=
-cpufreq_priority=10000
-
-cpufreq_find_all_files() {
- find "$1" -maxdepth $cpufreq_sys_depth -name scaling_cur_freq 2>/dev/null
-}
-
-# _check is called once, to find out if this chart should be enabled or not
-cpufreq_check() {
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- [ -z "$(cpufreq_find_all_files "$cpufreq_sys_dir")" ] && return 1
- return 0
-}
-
-# _create is called once, to create the charts
-cpufreq_create() {
- local dir file id i
-
- # we create a script with the source of the
- # cpufreq_update() function
- # - the highest speed we can achieve -
- [ $cpufreq_source_update -eq 1 ] && echo >"$TMP_DIR/cpufreq.sh" "cpufreq_update() {"
-
- echo "CHART cpu.cpufreq '' 'CPU Clock' 'MHz' 'cpufreq' '' line $((cpufreq_priority + 1)) $cpufreq_update_every"
- echo >>"$TMP_DIR/cpufreq.sh" "echo \"BEGIN cpu.cpufreq \$1\""
-
- i=0
- for file in $(cpufreq_find_all_files "$cpufreq_sys_dir" | sort -u); do
- i=$((i + 1))
- dir=$(dirname "$file")
- cpu=
-
- [ -f "$dir/affected_cpus" ] && cpu=$(cat "$dir/affected_cpus")
- [ -z "$cpu" ] && cpu="$i.a"
-
- id="$(fixid "cpu$cpu")"
-
- debug "file='$file', dir='$dir', cpu='$cpu', id='$id'"
-
- echo "DIMENSION $id '$id' absolute 1 1000"
- echo >>"$TMP_DIR/cpufreq.sh" "echo \"SET $id = \"\$(< $file )"
- done
- echo >>"$TMP_DIR/cpufreq.sh" "echo END"
-
- [ $cpufreq_source_update -eq 1 ] && echo >>"$TMP_DIR/cpufreq.sh" "}"
-
- # ok, load the function cpufreq_update() we created
- # shellcheck disable=SC1090
- [ $cpufreq_source_update -eq 1 ] && . "$TMP_DIR/cpufreq.sh"
-
- return 0
-}
-
-# _update is called continuously, to collect the values
-cpufreq_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
- # shellcheck disable=SC1090
- [ $cpufreq_source_update -eq 0 ] && . "$TMP_DIR/cpufreq.sh" "$1"
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.conf b/collectors/charts.d.plugin/cpufreq/cpufreq.conf
deleted file mode 100644
index 7130555af..000000000
--- a/collectors/charts.d.plugin/cpufreq/cpufreq.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-#cpufreq_sys_dir="/sys/devices"
-#cpufreq_sys_depth=10
-#cpufreq_source_update=1
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#cpufreq_update_every=
-
-# the charts priority on the dashboard
-#cpufreq_priority=10000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#cpufreq_retries=10
diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md
index 98562d624..de21f6ad6 100644
--- a/collectors/charts.d.plugin/example/README.md
+++ b/collectors/charts.d.plugin/example/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "Example"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/example/README.md
+-->
+
# Example
This is just an example charts.d data collector.
diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh
index 8bae570a3..5ff51a579 100644
--- a/collectors/charts.d.plugin/example/example.chart.sh
+++ b/collectors/charts.d.plugin/example/example.chart.sh
@@ -32,63 +32,63 @@ example_last=0
example_count=0
example_get() {
- # do all the work to collect / calculate the values
- # for each dimension
- #
- # Remember:
- # 1. KEEP IT SIMPLE AND SHORT
- # 2. AVOID FORKS (avoid piping commands)
- # 3. AVOID CALLING TOO MANY EXTERNAL PROGRAMS
- # 4. USE LOCAL VARIABLES (global variables may overlap with other modules)
-
- example_value1=$RANDOM
- example_value2=$RANDOM
- example_value3=$RANDOM
- example_value4=$((8192 + (RANDOM * 16383 / 32767)))
-
- if [ $example_count -gt 0 ]; then
- example_count=$((example_count - 1))
-
- [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ((32767 - example_last) / 2) / 32767)))
- [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767)))
- else
- example_count=$((1 + (RANDOM * 5 / 32767)))
-
- if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ]; then
- example_value4=$((example_value4 - 16383))
- fi
- if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ]; then
- example_value4=$((example_value4 + 16383))
- fi
- fi
- example_last=$example_value4
-
- # this should return:
- # - 0 to send the data to netdata
- # - 1 to report a failure to collect the data
-
- return 0
+ # do all the work to collect / calculate the values
+ # for each dimension
+ #
+ # Remember:
+ # 1. KEEP IT SIMPLE AND SHORT
+ # 2. AVOID FORKS (avoid piping commands)
+ # 3. AVOID CALLING TOO MANY EXTERNAL PROGRAMS
+ # 4. USE LOCAL VARIABLES (global variables may overlap with other modules)
+
+ example_value1=$RANDOM
+ example_value2=$RANDOM
+ example_value3=$RANDOM
+ example_value4=$((8192 + (RANDOM * 16383 / 32767)))
+
+ if [ $example_count -gt 0 ]; then
+ example_count=$((example_count - 1))
+
+ [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ((32767 - example_last) / 2) / 32767)))
+ [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767)))
+ else
+ example_count=$((1 + (RANDOM * 5 / 32767)))
+
+ if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ]; then
+ example_value4=$((example_value4 - 16383))
+ fi
+ if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ]; then
+ example_value4=$((example_value4 + 16383))
+ fi
+ fi
+ example_last=$example_value4
+
+ # this should return:
+ # - 0 to send the data to netdata
+ # - 1 to report a failure to collect the data
+
+ return 0
}
# _check is called once, to find out if this chart should be enabled or not
example_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
- # check something
- [ "${example_magic_number}" != "12345" ] && error "manual configuration required: you have to set example_magic_number=$example_magic_number in example.conf to start example chart." && return 1
+ # check something
+ [ "${example_magic_number}" != "12345" ] && error "manual configuration required: you have to set example_magic_number=$example_magic_number in example.conf to start example chart." && return 1
- # check that we can collect data
- example_get || return 1
+ # check that we can collect data
+ example_get || return 1
- return 0
+ return 0
}
# _create is called once, to create the charts
example_create() {
- # create the chart with 3 dimensions
- cat <<EOF
+ # create the chart with 3 dimensions
+ cat << EOF
CHART example.random '' "Random Numbers Stacked Chart" "% of random numbers" random random stacked $((example_priority)) $example_update_every
DIMENSION random1 '' percentage-of-absolute-row 1 1
DIMENSION random2 '' percentage-of-absolute-row 1 1
@@ -97,18 +97,18 @@ CHART example.random2 '' "A random number" "random number" random random area $(
DIMENSION random '' absolute 1 1
EOF
- return 0
+ return 0
}
# _update is called continuously, to collect the values
example_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
- example_get || return 1
+ example_get || return 1
- # write the result of the work.
- cat <<VALUESEOF
+ # write the result of the work.
+ cat << VALUESEOF
BEGIN example.random $1
SET random1 = $example_value1
SET random2 = $example_value2
@@ -119,5 +119,5 @@ SET random = $example_value4
END
VALUESEOF
- return 0
+ return 0
}
diff --git a/collectors/charts.d.plugin/exim/Makefile.inc b/collectors/charts.d.plugin/exim/Makefile.inc
deleted file mode 100644
index ca2112a80..000000000
--- a/collectors/charts.d.plugin/exim/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += exim/exim.chart.sh
-dist_chartsconfig_DATA += exim/exim.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += exim/README.md exim/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/exim/README.md b/collectors/charts.d.plugin/exim/README.md
deleted file mode 100644
index 5c73c002e..000000000
--- a/collectors/charts.d.plugin/exim/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# exim
-
-> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/exim) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/exim/exim.chart.sh b/collectors/charts.d.plugin/exim/exim.chart.sh
deleted file mode 100644
index 7b0ef70d2..000000000
--- a/collectors/charts.d.plugin/exim/exim.chart.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-# Contributed by @jsveiga with PR #480
-
-# the exim command to run
-exim_command=
-
-# how frequently to collect queue size
-exim_update_every=5
-
-exim_priority=60000
-
-exim_check() {
- if [ -z "${exim_command}" ]; then
- require_cmd exim || return 1
- exim_command="${EXIM_CMD}"
- fi
-
- if [ "$(${exim_command} -bpc 2>&1 | grep -c denied)" -ne 0 ]; then
- error "permission denied - please set 'queue_list_requires_admin = false' in your exim options file"
- return 1
- fi
-
- return 0
-}
-
-exim_create() {
- cat <<EOF
-CHART exim_local.qemails '' "Exim Queue Emails" "emails" queue exim.queued.emails line $((exim_priority + 1)) $exim_update_every
-DIMENSION emails '' absolute 1 1
-EOF
- return 0
-}
-
-exim_update() {
- echo "BEGIN exim_local.qemails $1"
- echo "SET emails = $(run "${exim_command}" -bpc)"
- echo "END"
- return 0
-}
diff --git a/collectors/charts.d.plugin/exim/exim.conf b/collectors/charts.d.plugin/exim/exim.conf
deleted file mode 100644
index f96ac4dbb..000000000
--- a/collectors/charts.d.plugin/exim/exim.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-# the exim command to run
-# if empty, it will use the one found in the system path
-#exim_command=
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#exim_update_every=5
-
-# the charts priority on the dashboard
-#exim_priority=60000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#exim_retries=10
diff --git a/collectors/charts.d.plugin/hddtemp/Makefile.inc b/collectors/charts.d.plugin/hddtemp/Makefile.inc
deleted file mode 100644
index 2bd29e5b1..000000000
--- a/collectors/charts.d.plugin/hddtemp/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += hddtemp/hddtemp.chart.sh
-dist_chartsconfig_DATA += hddtemp/hddtemp.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/hddtemp/README.md b/collectors/charts.d.plugin/hddtemp/README.md
deleted file mode 100644
index 77f48956a..000000000
--- a/collectors/charts.d.plugin/hddtemp/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# hddtemp
-
-> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/hddtemp) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-The plugin will collect temperatures from disks
-
-It will create one chart with all active disks
-
-1. **temperature in Celsius**
-
-## configuration
-
-hddtemp needs to be running in daemonized mode
-
-```sh
-# host with daemonized hddtemp
-hddtemp_host="localhost"
-
-# port on which hddtemp is showing data
-hddtemp_port="7634"
-
-# array of included disks
-# the default is to include all
-hddtemp_disks=()
-```
-
----
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
deleted file mode 100644
index a4cef3c3b..000000000
--- a/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
+++ /dev/null
@@ -1,77 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-# contributed by @paulfantom with PR #511
-
-# if this chart is called X.chart.sh, then all functions and global variables
-# must start with X_
-hddtemp_host="localhost"
-hddtemp_port="7634"
-declare -A hddtemp_disks=()
-
-# _update_every is a special variable - it holds the number of seconds
-# between the calls of the _update() function
-hddtemp_update_every=3
-hddtemp_priority=90000
-
-# _check is called once, to find out if this chart should be enabled or not
-hddtemp_check() {
- require_cmd nc || return 1
- run nc $hddtemp_host $hddtemp_port && return 0 || return 1
-}
-
-# _create is called once, to create the charts
-hddtemp_create() {
- if [ ${#hddtemp_disks[@]} -eq 0 ]; then
- local all
- all=$(nc $hddtemp_host $hddtemp_port)
- unset hddtemp_disks
- # shellcheck disable=SC2190,SC2207
- hddtemp_disks=($(grep -Po '/dev/[^|]+' <<<"$all" | cut -c 6-))
- fi
- # local disk_names
- # disk_names=(`sed -e 's/||/\n/g;s/^|//' <<< "$all" | cut -d '|' -f2 | tr ' ' '_'`)
-
- echo "CHART hddtemp.temperature 'disks_temp' 'temperature' 'Celsius' 'Disks temperature' 'hddtemp.temp' line $((hddtemp_priority)) $hddtemp_update_every"
- for i in $(seq 0 $((${#hddtemp_disks[@]} - 1))); do
- # echo "DIMENSION ${hddtemp_disks[i]} ${disk_names[i]} absolute 1 1"
- echo "DIMENSION ${hddtemp_disks[$i]} '' absolute 1 1"
- done
- return 0
-}
-
-# _update is called continuously, to collect the values
-#hddtemp_last=0
-#hddtemp_count=0
-hddtemp_update() {
- # local all=( `nc $hddtemp_host $hddtemp_port | sed -e 's/||/\n/g;s/^|//' | cut -d '|' -f3` )
- # local all=( `nc $hddtemp_host $hddtemp_port | awk 'BEGIN { FS="|" };{i=4; while (i <= NF) {print $i+0;i+=5;};}'` )
- OLD_IFS=$IFS
- set -f
- # shellcheck disable=SC2207
- IFS="|" all=($(nc $hddtemp_host $hddtemp_port 2>/dev/null))
- set +f
- IFS=$OLD_IFS
-
- # check if there is some data
- if [ -z "${all[3]}" ]; then
- return 1
- fi
-
- # write the result of the work.
- echo "BEGIN hddtemp.temperature $1"
- end=${#hddtemp_disks[@]}
- for ((i = 0; i < end; i++)); do
- # temperature - this will turn SLP to zero
- t=$((all[$((i * 5 + 3))]))
- echo "SET ${hddtemp_disks[$i]} = $t"
- done
- echo "END"
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/hddtemp/hddtemp.conf b/collectors/charts.d.plugin/hddtemp/hddtemp.conf
deleted file mode 100644
index b6037b40e..000000000
--- a/collectors/charts.d.plugin/hddtemp/hddtemp.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-#hddtemp_host="localhost"
-#hddtemp_port="7634"
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#hddtemp_update_every=3
-
-# the charts priority on the dashboard
-#hddtemp_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#hddtemp_retries=10
diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md
index d75c1ea96..b1c1f05e4 100644
--- a/collectors/charts.d.plugin/libreswan/README.md
+++ b/collectors/charts.d.plugin/libreswan/README.md
@@ -1,6 +1,12 @@
-# libreswan
+<!--
+title: "Libreswan IPSec tunnel monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/libreswan/README.md
+sidebar_label: "Libreswan IPSec tunnels"
+-->
-The plugin will collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels.
+# Libreswan IPSec tunnel monitoring with Netdata
+
+Collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels.
The following charts are created, **per tunnel**:
@@ -13,9 +19,15 @@ The following charts are created, **per tunnel**:
- bytes in
- bytes out
-## configuration
+## Configuration
+
+Edit the `charts.d/libreswan.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-Its config file is `/etc/netdata/charts.d/libreswan.conf`.
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/libreswan.conf
+```
The plugin executes 2 commands to collect all the information it needs:
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
index 1a8f90b11..bfa2b9ea1 100644
--- a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
+++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
@@ -37,78 +37,93 @@ declare -A libreswan_established_add_time=()
# we need this to avoid converting tunnel names to chart IDs on every iteration
declare -A libreswan_tunnel_charts=()
+is_able_sudo_ipsec() {
+ if ! sudo -n -l "${IPSEC_CMD}" whack --status > /dev/null 2>&1; then
+ return 1
+ fi
+ if ! sudo -n -l "${IPSEC_CMD}" whack --trafficstatus > /dev/null 2>&1; then
+ return 1
+ fi
+ return 0
+}
+
# run the ipsec command
libreswan_ipsec() {
- if [ ${libreswan_sudo} -ne 0 ]; then
- sudo -n "${IPSEC_CMD}" "${@}"
- return $?
- else
- "${IPSEC_CMD}" "${@}"
- return $?
- fi
+ if [ ${libreswan_sudo} -ne 0 ]; then
+ sudo -n "${IPSEC_CMD}" "${@}"
+ return $?
+ else
+ "${IPSEC_CMD}" "${@}"
+ return $?
+ fi
}
# fetch latest values - fill the arrays
libreswan_get() {
- # do all the work to collect / calculate the values
- # for each dimension
-
- # empty the variables
- libreswan_traffic_in=()
- libreswan_traffic_out=()
- libreswan_established_add_time=()
- libreswan_connected_tunnels=()
-
- # convert the ipsec command output to a shell script
- # and source it to get the values
- # shellcheck disable=SC1090
- source <(
- {
- libreswan_ipsec whack --status
- libreswan_ipsec whack --trafficstatus
- } | sed -n \
- -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \
- -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p"
- ) || return 1
-
- # check we got some data
- [ ${#libreswan_connected_tunnels[@]} -eq 0 ] && return 1
-
- return 0
+ # do all the work to collect / calculate the values
+ # for each dimension
+
+ # empty the variables
+ libreswan_traffic_in=()
+ libreswan_traffic_out=()
+ libreswan_established_add_time=()
+ libreswan_connected_tunnels=()
+
+ # convert the ipsec command output to a shell script
+ # and source it to get the values
+ # shellcheck disable=SC1090
+ source <(
+ {
+ libreswan_ipsec whack --status
+ libreswan_ipsec whack --trafficstatus
+ } | sed -n \
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \
+ -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",\{0,1\}.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p"
+ ) || return 1
+
+ # check we got some data
+ [ ${#libreswan_connected_tunnels[@]} -eq 0 ] && return 1
+
+ return 0
}
# _check is called once, to find out if this chart should be enabled or not
libreswan_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ require_cmd ipsec || return 1
- require_cmd ipsec || return 1
+ # make sure it is libreswan
+ # shellcheck disable=SC2143
+ if [ -z "$(ipsec --version | grep -i libreswan)" ]; then
+ error "ipsec command is not Libreswan. Disabling Libreswan plugin."
+ return 1
+ fi
- # make sure it is libreswan
- # shellcheck disable=SC2143
- if [ -z "$(ipsec --version | grep -i libreswan)" ]; then
- error "ipsec command is not Libreswan. Disabling Libreswan plugin."
- return 1
- fi
+ if [ ${libreswan_sudo} -ne 0 ] && ! is_able_sudo_ipsec; then
+ error "not enough permissions to execute ipsec with sudo. Disabling Libreswan plugin."
+ return 1
+ fi
- # check that we can collect data
- libreswan_get || return 1
+ # check that we can collect data
+ libreswan_get || return 1
- return 0
+ return 0
}
# create the charts for an ipsec tunnel
libreswan_create_one() {
- local n="${1}" name
+ local n="${1}" name
- name="${libreswan_connected_tunnels[${n}]}"
+ name="${libreswan_connected_tunnels[${n}]}"
- [ ! -z "${libreswan_tunnel_charts[${name}]}" ] && return 0
+ [ -n "${libreswan_tunnel_charts[${name}]}" ] && return 0
- libreswan_tunnel_charts[${name}]="$(fixid "${name}")"
+ libreswan_tunnel_charts[${name}]="$(fixid "${name}")"
- cat <<EOF
+ cat << EOF
CHART libreswan.${libreswan_tunnel_charts[${name}]}_net '${name}_net' "LibreSWAN Tunnel ${name} Traffic" "kilobits/s" "${name}" libreswan.net area $((libreswan_priority)) $libreswan_update_every
DIMENSION in '' incremental 8 1000
DIMENSION out '' incremental -8 1000
@@ -116,35 +131,35 @@ CHART libreswan.${libreswan_tunnel_charts[${name}]}_uptime '${name}_uptime' "Lib
DIMENSION uptime '' absolute 1 1
EOF
- return 0
+ return 0
}
# _create is called once, to create the charts
libreswan_create() {
- local n
- for n in "${!libreswan_connected_tunnels[@]}"; do
- libreswan_create_one "${n}"
- done
- return 0
+ local n
+ for n in "${!libreswan_connected_tunnels[@]}"; do
+ libreswan_create_one "${n}"
+ done
+ return 0
}
libreswan_now=$(date +%s)
# send the values to netdata for an ipsec tunnel
libreswan_update_one() {
- local n="${1}" microseconds="${2}" name id uptime
+ local n="${1}" microseconds="${2}" name id uptime
- name="${libreswan_connected_tunnels[${n}]}"
- id="${libreswan_tunnel_charts[${name}]}"
+ name="${libreswan_connected_tunnels[${n}]}"
+ id="${libreswan_tunnel_charts[${name}]}"
- [ -z "${id}" ] && libreswan_create_one "${name}"
+ [ -z "${id}" ] && libreswan_create_one "${name}"
- uptime=$((libreswan_now - libreswan_established_add_time[${n}]))
- [ ${uptime} -lt 0 ] && uptime=0
+ uptime=$((libreswan_now - libreswan_established_add_time[${n}]))
+ [ ${uptime} -lt 0 ] && uptime=0
- # write the result of the work.
- cat <<VALUESEOF
+ # write the result of the work.
+ cat << VALUESEOF
BEGIN libreswan.${id}_net ${microseconds}
SET in = ${libreswan_traffic_in[${n}]}
SET out = ${libreswan_traffic_out[${n}]}
@@ -157,16 +172,16 @@ VALUESEOF
# _update is called continiously, to collect the values
libreswan_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
- libreswan_get || return 1
- libreswan_now=$(date +%s)
+ libreswan_get || return 1
+ libreswan_now=$(date +%s)
- local n
- for n in "${!libreswan_connected_tunnels[@]}"; do
- libreswan_update_one "${n}" "${@}"
- done
+ local n
+ for n in "${!libreswan_connected_tunnels[@]}"; do
+ libreswan_update_one "${n}" "${@}"
+ done
- return 0
+ return 0
}
diff --git a/collectors/charts.d.plugin/load_average/Makefile.inc b/collectors/charts.d.plugin/load_average/Makefile.inc
deleted file mode 100644
index e5a481bf4..000000000
--- a/collectors/charts.d.plugin/load_average/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += load_average/load_average.chart.sh
-dist_chartsconfig_DATA += load_average/load_average.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += load_average/README.md load_average/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/load_average/README.md b/collectors/charts.d.plugin/load_average/README.md
deleted file mode 100644
index 40b860cc5..000000000
--- a/collectors/charts.d.plugin/load_average/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# load_average
-
-> THIS MODULE IS OBSOLETE.
-> THE NETDATA DAEMON COLLECTS LOAD AVERAGE BY ITSELF
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fload_average%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/load_average/load_average.chart.sh b/collectors/charts.d.plugin/load_average/load_average.chart.sh
deleted file mode 100644
index 841e3d9f6..000000000
--- a/collectors/charts.d.plugin/load_average/load_average.chart.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-load_average_update_every=5
-load_priority=100
-
-# this is an example charts.d collector
-# it is disabled by default.
-# there is no point to enable it, since netdata already
-# collects this information using its internal plugins.
-load_average_enabled=0
-
-load_average_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- if [ ${load_average_update_every} -lt 5 ]; then
- # there is no meaning for shorter than 5 seconds
- # the kernel changes this value every 5 seconds
- load_average_update_every=5
- fi
-
- [ ${load_average_enabled} -eq 0 ] && return 1
- return 0
-}
-
-load_average_create() {
- # create a chart with 3 dimensions
- cat <<EOF
-CHART system.load '' "System Load Average" "load" load system.load line $((load_priority + 1)) $load_average_update_every
-DIMENSION load1 '1 min' absolute 1 100
-DIMENSION load5 '5 mins' absolute 1 100
-DIMENSION load15 '15 mins' absolute 1 100
-EOF
-
- return 0
-}
-
-load_average_update() {
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- # here we parse the system average load
- # it is decimal (with 2 decimal digits), so we remove the dot and
- # at the definition we have divisor = 100, to have the graph show the right value
- loadavg="$(cat /proc/loadavg | sed -e "s/\.//g")"
- load1=$(echo $loadavg | cut -d ' ' -f 1)
- load5=$(echo $loadavg | cut -d ' ' -f 2)
- load15=$(echo $loadavg | cut -d ' ' -f 3)
-
- # write the result of the work.
- cat <<VALUESEOF
-BEGIN system.load
-SET load1 = $load1
-SET load5 = $load5
-SET load15 = $load15
-END
-VALUESEOF
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/load_average/load_average.conf b/collectors/charts.d.plugin/load_average/load_average.conf
deleted file mode 100644
index 68979275f..000000000
--- a/collectors/charts.d.plugin/load_average/load_average.conf
+++ /dev/null
@@ -1,22 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# netdata can collect this metric already
-
-#load_average_enabled=0
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#load_average_update_every=5
-
-# the charts priority on the dashboard
-#load_average_priority=100
-
-# the number of retries to do in case of failure
-# before disabling the module
-#load_average_retries=10
diff --git a/collectors/charts.d.plugin/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc
index e44eff689..c386083fb 100644
--- a/collectors/charts.d.plugin/loopsleepms.sh.inc
+++ b/collectors/charts.d.plugin/loopsleepms.sh.inc
@@ -10,16 +10,24 @@ fi
# -----------------------------------------------------------------------------
# use the date command as a high resolution timer
+# macOS 'date' doesnt support '%N' precision
+# echo $(/bin/date +"%N") is "N"
+if [ "$($LOOPSLEEP_DATE +"%N")" = "N" ]; then
+ LOOPSLEEP_DATE_FORMAT="%s * 1000"
+else
+ LOOPSLEEP_DATE_FORMAT="%s * 1000 + 10#%-N / 1000000"
+fi
+
now_ms=
LOOPSLEEPMS_HIGHRES=1
test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0
test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0
current_time_ms_from_date() {
- if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]; then
- now_ms="$($LOOPSLEEP_DATE +'%s')000"
- else
- now_ms="$(($($LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000')))"
- fi
+ if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]; then
+ now_ms="$($LOOPSLEEP_DATE +'%s')000"
+ else
+ now_ms="$(($($LOOPSLEEP_DATE +"$LOOPSLEEP_DATE_FORMAT")))"
+ fi
}
# -----------------------------------------------------------------------------
diff --git a/collectors/charts.d.plugin/mem_apps/Makefile.inc b/collectors/charts.d.plugin/mem_apps/Makefile.inc
deleted file mode 100644
index ea546fb69..000000000
--- a/collectors/charts.d.plugin/mem_apps/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += mem_apps/mem_apps.chart.sh
-dist_chartsconfig_DATA += mem_apps/mem_apps.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += mem_apps/README.md mem_apps/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/mem_apps/README.md b/collectors/charts.d.plugin/mem_apps/README.md
deleted file mode 100644
index 93d33832a..000000000
--- a/collectors/charts.d.plugin/mem_apps/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# mem_apps
-
-> THIS MODULE IS OBSOLETE.
-> USE [APPS.PLUGIN](../../apps.plugin).
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmem_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
deleted file mode 100644
index b9b84a467..000000000
--- a/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-# shellcheck shell=bash disable=SC2154,SC1072,SC1073,SC2009,SC2162,SC2006,SC2002,SC2086,SC1117
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-mem_apps_apps=
-
-# these are required for computing memory in bytes and cpu in seconds
-#mem_apps_pagesize="`getconf PAGESIZE`"
-#mem_apps_clockticks="`getconf CLK_TCK`"
-
-mem_apps_update_every=
-
-mem_apps_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- if [ -z "$mem_apps_apps" ]; then
- error "manual configuration required: please set mem_apps_apps='command1 command2 ...' in $confd/mem_apps_apps.conf"
- return 1
- fi
- return 0
-}
-
-mem_apps_bc_finalze=
-
-mem_apps_create() {
-
- echo "CHART chartsd_apps.mem '' 'Apps Memory' MB apps apps.mem stacked 20000 $mem_apps_update_every"
-
- local x=
- for x in $mem_apps_apps; do
- echo "DIMENSION $x $x absolute 1 1024"
-
- # this string is needed later in the update() function
- # to finalize the instructions for the bc command
- mem_apps_bc_finalze="$mem_apps_bc_finalze \"SET $x = \"; $x;"
- done
- return 0
-}
-
-mem_apps_update() {
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- echo "BEGIN chartsd_apps.mem"
- ps -o comm,rss -C "$mem_apps_apps" |
- grep -v "^COMMAND" |
- (
- sed -e "s/ \+/ /g" -e "s/ /+=/g"
- echo "$mem_apps_bc_finalze"
- ) | bc
- echo "END"
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.conf b/collectors/charts.d.plugin/mem_apps/mem_apps.conf
deleted file mode 100644
index 75d24dc3e..000000000
--- a/collectors/charts.d.plugin/mem_apps/mem_apps.conf
+++ /dev/null
@@ -1,19 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# app.plugin can do better
-
-#mem_apps_apps=
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#mem_apps_update_every=2
-
-# the number of retries to do in case of failure
-# before disabling the module
-#mem_apps_retries=10
diff --git a/collectors/charts.d.plugin/mysql/Makefile.inc b/collectors/charts.d.plugin/mysql/Makefile.inc
deleted file mode 100644
index ca02fd078..000000000
--- a/collectors/charts.d.plugin/mysql/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += mysql/mysql.chart.sh
-dist_chartsconfig_DATA += mysql/mysql.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += mysql/README.md mysql/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/mysql/README.md b/collectors/charts.d.plugin/mysql/README.md
deleted file mode 100644
index 2e8d72a43..000000000
--- a/collectors/charts.d.plugin/mysql/README.md
+++ /dev/null
@@ -1,88 +0,0 @@
-# mysql
-
-> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/mysql) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-The plugin will monitor one or more mysql servers
-
-It will produce the following charts:
-
-1. **Bandwidth** in kbps
-
-- in
-- out
-
-2. **Queries** in queries/sec
-
-- queries
-- questions
-- slow queries
-
-3. **Operations** in operations/sec
-
-- opened tables
-- flush
-- commit
-- delete
-- prepare
-- read first
-- read key
-- read next
-- read prev
-- read random
-- read random next
-- rollback
-- save point
-- update
-- write
-
-4. **Table Locks** in locks/sec
-
-- immediate
-- waited
-
-5. **Select Issues** in issues/sec
-
-- full join
-- full range join
-- range
-- range check
-- scan
-
-6. **Sort Issues** in issues/sec
-
-- merge passes
-- range
-- scan
-
-## configuration
-
-You can configure many database servers, like this:
-
-You can provide, per server, the following:
-
-1. a name, anything you like, but keep it short
-2. the mysql command to connect to the server
-3. the mysql command line options to be used for connecting to the server
-
-Here is an example for 2 servers:
-
-```sh
-mysql_opts[server1]="-h server1.example.com"
-mysql_opts[server2]="-h server2.example.com --connect_timeout 2"
-```
-
-The above will use the `mysql` command found in the system path.
-You can also provide a custom mysql command per server, like this:
-
-```sh
-mysql_cmds[server2]="/opt/mysql/bin/mysql"
-```
-
-The above sets the mysql command only for server2. server1 will use the system default.
-
-If no configuration is given, the plugin will attempt to connect to mysql server at localhost.
-
----
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/mysql/mysql.chart.sh b/collectors/charts.d.plugin/mysql/mysql.chart.sh
deleted file mode 100644
index e1207dc9a..000000000
--- a/collectors/charts.d.plugin/mysql/mysql.chart.sh
+++ /dev/null
@@ -1,511 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# http://dev.mysql.com/doc/refman/5.0/en/server-status-variables.html
-#
-# https://dev.mysql.com/doc/refman/5.1/en/show-status.html
-# SHOW STATUS provides server status information (see Section 5.1.6, “Server Status Variables”).
-# This statement does not require any privilege.
-# It requires only the ability to connect to the server.
-
-mysql_update_every=2
-mysql_priority=60000
-
-declare -A mysql_cmds=() mysql_opts=() mysql_ids=() mysql_data=()
-
-mysql_get() {
- local arr
- local oIFS="${IFS}"
- mysql_data=()
- IFS=$'\t'$'\n'
- #arr=($(run "${@}" -e "SHOW GLOBAL STATUS WHERE value REGEXP '^[0-9]';" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)" ))
- #arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^ ]+\s[0-9]" ))
- # shellcheck disable=SC2207
- arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | grep -E "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^[:space:]]+[[:space:]]+[0-9]+"))
- IFS="${oIFS}"
-
- [ "${#arr[@]}" -lt 3 ] && return 1
- local end=${#arr[@]}
- for ((i = 2; i < end; i += 2)); do
- mysql_data["${arr[$i]}"]=${arr[i + 1]}
- done
-
- [ -z "${mysql_data[Connections]}" ] && return 1
-
- mysql_data[Thread_cache_misses]=0
- [ $((mysql_data[Connections] + 1 - 1)) -gt 0 ] && mysql_data[Thread_cache_misses]=$((mysql_data[Threads_created] * 10000 / mysql_data[Connections]))
-
- return 0
-}
-
-mysql_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- local x m mysql_cmd tryroot=0 unconfigured=0
-
- if [ "${1}" = "tryroot" ]; then
- tryroot=1
- shift
- fi
-
- # shellcheck disable=SC2230
- [ -z "${mysql_cmd}" ] && mysql_cmd="$(which mysql 2>/dev/null || command -v mysql 2>/dev/null)"
-
- if [ ${#mysql_opts[@]} -eq 0 ]; then
- unconfigured=1
-
- mysql_cmds[local]="$mysql_cmd"
-
- if [ $tryroot -eq 1 ]; then
- # the user has not configured us for mysql access
- # if the root user is passwordless in mysql, we can
- # attempt to connect to mysql as root
- mysql_opts[local]="-u root"
- else
- mysql_opts[local]=
- fi
- fi
-
- # check once if the url works
- for m in "${!mysql_opts[@]}"; do
- [ -z "${mysql_cmds[$m]}" ] && mysql_cmds[$m]="$mysql_cmd"
- if [ -z "${mysql_cmds[$m]}" ]; then
- # shellcheck disable=SC2154
- error "cannot get mysql command for '${m}'. Please set mysql_cmds[$m]='/path/to/mysql', in $confd/mysql.conf"
- fi
-
- mysql_get "${mysql_cmds[$m]}" ${mysql_opts[$m]}
- # shellcheck disable=SC2181
- if [ ! $? -eq 0 ]; then
- error "cannot get global status for '$m'. Please set mysql_opts[$m]='options' to whatever needed to get connected to the mysql server, in $confd/mysql.conf"
- unset "mysql_cmds[$m]"
- unset "mysql_opts[$m]"
- unset "mysql_ids[$m]"
- continue
- fi
-
- mysql_ids[$m]="$(fixid "$m")"
- done
-
- if [ ${#mysql_opts[@]} -eq 0 ]; then
- if [ ${unconfigured} -eq 1 ] && [ ${tryroot} -eq 0 ]; then
- mysql_check tryroot "${@}"
- return $?
- else
- error "no mysql servers found. Please set mysql_opts[name]='options' to whatever needed to get connected to the mysql server, in $confd/mysql.conf"
- return 1
- fi
- fi
-
- return 0
-}
-
-mysql_create() {
- local x
-
- # create the charts
- for x in "${mysql_ids[@]}"; do
- cat <<EOF
-CHART mysql_$x.net '' "mysql Bandwidth" "kilobits/s" bandwidth mysql.net area $((mysql_priority + 1)) $mysql_update_every
-DIMENSION Bytes_received in incremental 8 1024
-DIMENSION Bytes_sent out incremental -8 1024
-
-CHART mysql_$x.queries '' "mysql Queries" "queries/s" queries mysql.queries line $((mysql_priority + 2)) $mysql_update_every
-DIMENSION Queries queries incremental 1 1
-DIMENSION Questions questions incremental 1 1
-DIMENSION Slow_queries slow_queries incremental -1 1
-
-CHART mysql_$x.handlers '' "mysql Handlers" "handlers/s" handlers mysql.handlers line $((mysql_priority + 3)) $mysql_update_every
-DIMENSION Handler_commit commit incremental 1 1
-DIMENSION Handler_delete delete incremental 1 1
-DIMENSION Handler_prepare prepare incremental 1 1
-DIMENSION Handler_read_first read_first incremental 1 1
-DIMENSION Handler_read_key read_key incremental 1 1
-DIMENSION Handler_read_next read_next incremental 1 1
-DIMENSION Handler_read_prev read_prev incremental 1 1
-DIMENSION Handler_read_rnd read_rnd incremental 1 1
-DIMENSION Handler_read_rnd_next read_rnd_next incremental 1 1
-DIMENSION Handler_rollback rollback incremental 1 1
-DIMENSION Handler_savepoint savepoint incremental 1 1
-DIMENSION Handler_savepoint_rollback savepoint_rollback incremental 1 1
-DIMENSION Handler_update update incremental 1 1
-DIMENSION Handler_write write incremental 1 1
-
-CHART mysql_$x.table_locks '' "mysql Tables Locks" "locks/s" locks mysql.table_locks line $((mysql_priority + 4)) $mysql_update_every
-DIMENSION Table_locks_immediate immediate incremental 1 1
-DIMENSION Table_locks_waited waited incremental -1 1
-
-CHART mysql_$x.join_issues '' "mysql Select Join Issues" "joins/s" issues mysql.join_issues line $((mysql_priority + 5)) $mysql_update_every
-DIMENSION Select_full_join full_join incremental 1 1
-DIMENSION Select_full_range_join full_range_join incremental 1 1
-DIMENSION Select_range range incremental 1 1
-DIMENSION Select_range_check range_check incremental 1 1
-DIMENSION Select_scan scan incremental 1 1
-
-CHART mysql_$x.sort_issues '' "mysql Sort Issues" "issues/s" issues mysql.sort.issues line $((mysql_priority + 6)) $mysql_update_every
-DIMENSION Sort_merge_passes merge_passes incremental 1 1
-DIMENSION Sort_range range incremental 1 1
-DIMENSION Sort_scan scan incremental 1 1
-
-CHART mysql_$x.tmp '' "mysql Tmp Operations" "counter" temporaries mysql.tmp line $((mysql_priority + 7)) $mysql_update_every
-DIMENSION Created_tmp_disk_tables disk_tables incremental 1 1
-DIMENSION Created_tmp_files files incremental 1 1
-DIMENSION Created_tmp_tables tables incremental 1 1
-
-CHART mysql_$x.connections '' "mysql Connections" "connections/s" connections mysql.connections line $((mysql_priority + 8)) $mysql_update_every
-DIMENSION Connections all incremental 1 1
-DIMENSION Aborted_connects aborded incremental 1 1
-
-CHART mysql_$x.binlog_cache '' "mysql Binlog Cache" "transactions/s" binlog mysql.binlog_cache line $((mysql_priority + 9)) $mysql_update_every
-DIMENSION Binlog_cache_disk_use disk incremental 1 1
-DIMENSION Binlog_cache_use all incremental 1 1
-
-CHART mysql_$x.threads '' "mysql Threads" "threads" threads mysql.threads line $((mysql_priority + 10)) $mysql_update_every
-DIMENSION Threads_connected connected absolute 1 1
-DIMENSION Threads_created created incremental 1 1
-DIMENSION Threads_cached cached absolute -1 1
-DIMENSION Threads_running running absolute 1 1
-
-CHART mysql_$x.thread_cache_misses '' "mysql Threads Cache Misses" "misses" threads mysql.thread_cache_misses area $((mysql_priority + 11)) $mysql_update_every
-DIMENSION misses misses absolute 1 100
-
-CHART mysql_$x.innodb_io '' "mysql InnoDB I/O Bandwidth" "kilobytes/s" innodb mysql.innodb_io area $((mysql_priority + 12)) $mysql_update_every
-DIMENSION Innodb_data_read read incremental 1 1024
-DIMENSION Innodb_data_written write incremental -1 1024
-
-CHART mysql_$x.innodb_io_ops '' "mysql InnoDB I/O Operations" "operations/s" innodb mysql.innodb_io_ops line $((mysql_priority + 13)) $mysql_update_every
-DIMENSION Innodb_data_reads reads incremental 1 1
-DIMENSION Innodb_data_writes writes incremental -1 1
-DIMENSION Innodb_data_fsyncs fsyncs incremental 1 1
-
-CHART mysql_$x.innodb_io_pending_ops '' "mysql InnoDB Pending I/O Operations" "operations" innodb mysql.innodb_io_pending_ops line $((mysql_priority + 14)) $mysql_update_every
-DIMENSION Innodb_data_pending_reads reads absolute 1 1
-DIMENSION Innodb_data_pending_writes writes absolute -1 1
-DIMENSION Innodb_data_pending_fsyncs fsyncs absolute 1 1
-
-CHART mysql_$x.innodb_log '' "mysql InnoDB Log Operations" "operations/s" innodb mysql.innodb_log line $((mysql_priority + 15)) $mysql_update_every
-DIMENSION Innodb_log_waits waits incremental 1 1
-DIMENSION Innodb_log_write_requests write_requests incremental -1 1
-DIMENSION Innodb_log_writes writes incremental -1 1
-
-CHART mysql_$x.innodb_os_log '' "mysql InnoDB OS Log Operations" "operations" innodb mysql.innodb_os_log line $((mysql_priority + 16)) $mysql_update_every
-DIMENSION Innodb_os_log_fsyncs fsyncs incremental 1 1
-DIMENSION Innodb_os_log_pending_fsyncs pending_fsyncs absolute 1 1
-DIMENSION Innodb_os_log_pending_writes pending_writes absolute -1 1
-
-CHART mysql_$x.innodb_os_log_io '' "mysql InnoDB OS Log Bandwidth" "kilobytes/s" innodb mysql.innodb_os_log_io area $((mysql_priority + 17)) $mysql_update_every
-DIMENSION Innodb_os_log_written write incremental -1 1024
-
-CHART mysql_$x.innodb_cur_row_lock '' "mysql InnoDB Current Row Locks" "operations" innodb mysql.innodb_cur_row_lock area $((mysql_priority + 18)) $mysql_update_every
-DIMENSION Innodb_row_lock_current_waits current_waits absolute 1 1
-
-CHART mysql_$x.innodb_rows '' "mysql InnoDB Row Operations" "operations/s" innodb mysql.innodb_rows area $((mysql_priority + 19)) $mysql_update_every
-DIMENSION Innodb_rows_read read incremental 1 1
-DIMENSION Innodb_rows_deleted deleted incremental -1 1
-DIMENSION Innodb_rows_inserted inserted incremental 1 1
-DIMENSION Innodb_rows_updated updated incremental -1 1
-
-CHART mysql_$x.innodb_buffer_pool_pages '' "mysql InnoDB Buffer Pool Pages" "pages" innodb mysql.innodb_buffer_pool_pages line $((mysql_priority + 20)) $mysql_update_every
-DIMENSION Innodb_buffer_pool_pages_data data absolute 1 1
-DIMENSION Innodb_buffer_pool_pages_dirty dirty absolute -1 1
-DIMENSION Innodb_buffer_pool_pages_free free absolute 1 1
-DIMENSION Innodb_buffer_pool_pages_flushed flushed incremental -1 1
-DIMENSION Innodb_buffer_pool_pages_misc misc absolute -1 1
-DIMENSION Innodb_buffer_pool_pages_total total absolute 1 1
-
-CHART mysql_$x.innodb_buffer_pool_bytes '' "mysql InnoDB Buffer Pool Bytes" "MiB" innodb mysql.innodb_buffer_pool_bytes area $((mysql_priority + 21)) $mysql_update_every
-DIMENSION Innodb_buffer_pool_bytes_data data absolute 1 $((1024 * 1024))
-DIMENSION Innodb_buffer_pool_bytes_dirty dirty absolute -1 $((1024 * 1024))
-
-CHART mysql_$x.innodb_buffer_pool_read_ahead '' "mysql InnoDB Buffer Pool Read Ahead" "operations/s" innodb mysql.innodb_buffer_pool_read_ahead area $((mysql_priority + 22)) $mysql_update_every
-DIMENSION Innodb_buffer_pool_read_ahead all incremental 1 1
-DIMENSION Innodb_buffer_pool_read_ahead_evicted evicted incremental -1 1
-DIMENSION Innodb_buffer_pool_read_ahead_rnd random incremental 1 1
-
-CHART mysql_$x.innodb_buffer_pool_reqs '' "mysql InnoDB Buffer Pool Requests" "requests/s" innodb mysql.innodb_buffer_pool_reqs area $((mysql_priority + 23)) $mysql_update_every
-DIMENSION Innodb_buffer_pool_read_requests reads incremental 1 1
-DIMENSION Innodb_buffer_pool_write_requests writes incremental -1 1
-
-CHART mysql_$x.innodb_buffer_pool_ops '' "mysql InnoDB Buffer Pool Operations" "operations/s" innodb mysql.innodb_buffer_pool_ops area $((mysql_priority + 24)) $mysql_update_every
-DIMENSION Innodb_buffer_pool_reads 'disk reads' incremental 1 1
-DIMENSION Innodb_buffer_pool_wait_free 'wait free' incremental -1 1
-
-CHART mysql_$x.qcache_ops '' "mysql QCache Operations" "queries/s" qcache mysql.qcache_ops line $((mysql_priority + 25)) $mysql_update_every
-DIMENSION Qcache_hits hits incremental 1 1
-DIMENSION Qcache_lowmem_prunes 'lowmem prunes' incremental -1 1
-DIMENSION Qcache_inserts inserts incremental 1 1
-DIMENSION Qcache_not_cached 'not cached' incremental -1 1
-
-CHART mysql_$x.qcache '' "mysql QCache Queries in Cache" "queries" qcache mysql.qcache line $((mysql_priority + 26)) $mysql_update_every
-DIMENSION Qcache_queries_in_cache queries absolute 1 1
-
-CHART mysql_$x.qcache_freemem '' "mysql QCache Free Memory" "MiB" qcache mysql.qcache_freemem area $((mysql_priority + 27)) $mysql_update_every
-DIMENSION Qcache_free_memory free absolute 1 $((1024 * 1024))
-
-CHART mysql_$x.qcache_memblocks '' "mysql QCache Memory Blocks" "blocks" qcache mysql.qcache_memblocks line $((mysql_priority + 28)) $mysql_update_every
-DIMENSION Qcache_free_blocks free absolute 1 1
-DIMENSION Qcache_total_blocks total absolute 1 1
-
-CHART mysql_$x.key_blocks '' "mysql MyISAM Key Cache Blocks" "blocks" myisam mysql.key_blocks line $((mysql_priority + 29)) $mysql_update_every
-DIMENSION Key_blocks_unused unused absolute 1 1
-DIMENSION Key_blocks_used used absolute -1 1
-DIMENSION Key_blocks_not_flushed 'not flushed' absolute 1 1
-
-CHART mysql_$x.key_requests '' "mysql MyISAM Key Cache Requests" "requests/s" myisam mysql.key_requests area $((mysql_priority + 30)) $mysql_update_every
-DIMENSION Key_read_requests reads incremental 1 1
-DIMENSION Key_write_requests writes incremental -1 1
-
-CHART mysql_$x.key_disk_ops '' "mysql MyISAM Key Cache Disk Operations" "operations/s" myisam mysql.key_disk_ops area $((mysql_priority + 31)) $mysql_update_every
-DIMENSION Key_reads reads incremental 1 1
-DIMENSION Key_writes writes incremental -1 1
-
-CHART mysql_$x.files '' "mysql Open Files" "files" files mysql.files line $((mysql_priority + 32)) $mysql_update_every
-DIMENSION Open_files files absolute 1 1
-
-CHART mysql_$x.files_rate '' "mysql Opened Files Rate" "files/s" files mysql.files_rate line $((mysql_priority + 33)) $mysql_update_every
-DIMENSION Opened_files files incremental 1 1
-EOF
-
- if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]; then
- cat <<EOF
-CHART mysql_$x.binlog_stmt_cache '' "mysql Binlog Statement Cache" "statements/s" binlog mysql.binlog_stmt_cache line $((mysql_priority + 50)) $mysql_update_every
-DIMENSION Binlog_stmt_cache_disk_use disk incremental 1 1
-DIMENSION Binlog_stmt_cache_use all incremental 1 1
-EOF
- fi
-
- if [ ! -z "${mysql_data[Connection_errors_accept]}" ]; then
- cat <<EOF
-CHART mysql_$x.connection_errors '' "mysql Connection Errors" "connections/s" connections mysql.connection_errors line $((mysql_priority + 51)) $mysql_update_every
-DIMENSION Connection_errors_accept accept incremental 1 1
-DIMENSION Connection_errors_internal internal incremental 1 1
-DIMENSION Connection_errors_max_connections max incremental 1 1
-DIMENSION Connection_errors_peer_addr peer_addr incremental 1 1
-DIMENSION Connection_errors_select select incremental 1 1
-DIMENSION Connection_errors_tcpwrap tcpwrap incremental 1 1
-EOF
- fi
-
- done
- return 0
-}
-
-mysql_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- local m x
- for m in "${!mysql_ids[@]}"; do
- x="${mysql_ids[$m]}"
- mysql_get "${mysql_cmds[$m]}" ${mysql_opts[$m]}
-
- # shellcheck disable=SC2181
- if [ $? -ne 0 ]; then
- unset "mysql_ids[$m]"
- unset "mysql_opts[$m]"
- unset "mysql_cmds[$m]"
- error "failed to get values for '${m}', disabling it."
- continue
- fi
-
- # write the result of the work.
- cat <<VALUESEOF
-BEGIN mysql_$x.net $1
-SET Bytes_received = ${mysql_data[Bytes_received]}
-SET Bytes_sent = ${mysql_data[Bytes_sent]}
-END
-BEGIN mysql_$x.queries $1
-SET Queries = ${mysql_data[Queries]}
-SET Questions = ${mysql_data[Questions]}
-SET Slow_queries = ${mysql_data[Slow_queries]}
-END
-BEGIN mysql_$x.handlers $1
-SET Handler_commit = ${mysql_data[Handler_commit]}
-SET Handler_delete = ${mysql_data[Handler_delete]}
-SET Handler_prepare = ${mysql_data[Handler_prepare]}
-SET Handler_read_first = ${mysql_data[Handler_read_first]}
-SET Handler_read_key = ${mysql_data[Handler_read_key]}
-SET Handler_read_next = ${mysql_data[Handler_read_next]}
-SET Handler_read_prev = ${mysql_data[Handler_read_prev]}
-SET Handler_read_rnd = ${mysql_data[Handler_read_rnd]}
-SET Handler_read_rnd_next = ${mysql_data[Handler_read_rnd_next]}
-SET Handler_rollback = ${mysql_data[Handler_rollback]}
-SET Handler_savepoint = ${mysql_data[Handler_savepoint]}
-SET Handler_savepoint_rollback = ${mysql_data[Handler_savepoint_rollback]}
-SET Handler_update = ${mysql_data[Handler_update]}
-SET Handler_write = ${mysql_data[Handler_write]}
-END
-BEGIN mysql_$x.table_locks $1
-SET Table_locks_immediate = ${mysql_data[Table_locks_immediate]}
-SET Table_locks_waited = ${mysql_data[Table_locks_waited]}
-END
-BEGIN mysql_$x.join_issues $1
-SET Select_full_join = ${mysql_data[Select_full_join]}
-SET Select_full_range_join = ${mysql_data[Select_full_range_join]}
-SET Select_range = ${mysql_data[Select_range]}
-SET Select_range_check = ${mysql_data[Select_range_check]}
-SET Select_scan = ${mysql_data[Select_scan]}
-END
-BEGIN mysql_$x.sort_issues $1
-SET Sort_merge_passes = ${mysql_data[Sort_merge_passes]}
-SET Sort_range = ${mysql_data[Sort_range]}
-SET Sort_scan = ${mysql_data[Sort_scan]}
-END
-BEGIN mysql_$x.tmp $1
-SET Created_tmp_disk_tables = ${mysql_data[Created_tmp_disk_tables]}
-SET Created_tmp_files = ${mysql_data[Created_tmp_files]}
-SET Created_tmp_tables = ${mysql_data[Created_tmp_tables]}
-END
-BEGIN mysql_$x.connections $1
-SET Connections = ${mysql_data[Connections]}
-SET Aborted_connects = ${mysql_data[Aborted_connects]}
-END
-BEGIN mysql_$x.binlog_cache $1
-SET Binlog_cache_disk_use = ${mysql_data[Binlog_cache_disk_use]}
-SET Binlog_cache_use = ${mysql_data[Binlog_cache_use]}
-END
-BEGIN mysql_$x.threads $1
-SET Threads_connected = ${mysql_data[Threads_connected]}
-SET Threads_created = ${mysql_data[Threads_created]}
-SET Threads_cached = ${mysql_data[Threads_cached]}
-SET Threads_running = ${mysql_data[Threads_running]}
-END
-BEGIN mysql_$x.thread_cache_misses $1
-SET misses = ${mysql_data[Thread_cache_misses]}
-END
-BEGIN mysql_$x.innodb_io $1
-SET Innodb_data_read = ${mysql_data[Innodb_data_read]}
-SET Innodb_data_written = ${mysql_data[Innodb_data_written]}
-END
-BEGIN mysql_$x.innodb_io_ops $1
-SET Innodb_data_reads = ${mysql_data[Innodb_data_reads]}
-SET Innodb_data_writes = ${mysql_data[Innodb_data_writes]}
-SET Innodb_data_fsyncs = ${mysql_data[Innodb_data_fsyncs]}
-END
-BEGIN mysql_$x.innodb_io_pending_ops $1
-SET Innodb_data_pending_reads = ${mysql_data[Innodb_data_pending_reads]}
-SET Innodb_data_pending_writes = ${mysql_data[Innodb_data_pending_writes]}
-SET Innodb_data_pending_fsyncs = ${mysql_data[Innodb_data_pending_fsyncs]}
-END
-BEGIN mysql_$x.innodb_log $1
-SET Innodb_log_waits = ${mysql_data[Innodb_log_waits]}
-SET Innodb_log_write_requests = ${mysql_data[Innodb_log_write_requests]}
-SET Innodb_log_writes = ${mysql_data[Innodb_log_writes]}
-END
-BEGIN mysql_$x.innodb_os_log $1
-SET Innodb_os_log_fsyncs = ${mysql_data[Innodb_os_log_fsyncs]}
-SET Innodb_os_log_pending_fsyncs = ${mysql_data[Innodb_os_log_pending_fsyncs]}
-SET Innodb_os_log_pending_writes = ${mysql_data[Innodb_os_log_pending_writes]}
-END
-BEGIN mysql_$x.innodb_os_log_io $1
-SET Innodb_os_log_written = ${mysql_data[Innodb_os_log_written]}
-END
-BEGIN mysql_$x.innodb_cur_row_lock $1
-SET Innodb_row_lock_current_waits = ${mysql_data[Innodb_row_lock_current_waits]}
-END
-BEGIN mysql_$x.innodb_rows $1
-SET Innodb_rows_inserted = ${mysql_data[Innodb_rows_inserted]}
-SET Innodb_rows_read = ${mysql_data[Innodb_rows_read]}
-SET Innodb_rows_updated = ${mysql_data[Innodb_rows_updated]}
-SET Innodb_rows_deleted = ${mysql_data[Innodb_rows_deleted]}
-END
-BEGIN mysql_$x.innodb_buffer_pool_pages $1
-SET Innodb_buffer_pool_pages_data = ${mysql_data[Innodb_buffer_pool_pages_data]}
-SET Innodb_buffer_pool_pages_dirty = ${mysql_data[Innodb_buffer_pool_pages_dirty]}
-SET Innodb_buffer_pool_pages_free = ${mysql_data[Innodb_buffer_pool_pages_free]}
-SET Innodb_buffer_pool_pages_flushed = ${mysql_data[Innodb_buffer_pool_pages_flushed]}
-SET Innodb_buffer_pool_pages_misc = ${mysql_data[Innodb_buffer_pool_pages_misc]}
-SET Innodb_buffer_pool_pages_total = ${mysql_data[Innodb_buffer_pool_pages_total]}
-END
-BEGIN mysql_$x.innodb_buffer_pool_bytes $1
-SET Innodb_buffer_pool_bytes_data = ${mysql_data[Innodb_buffer_pool_bytes_data]}
-SET Innodb_buffer_pool_bytes_dirty = ${mysql_data[Innodb_buffer_pool_bytes_dirty]}
-END
-BEGIN mysql_$x.innodb_buffer_pool_read_ahead $1
-SET Innodb_buffer_pool_read_ahead = ${mysql_data[Innodb_buffer_pool_read_ahead]}
-SET Innodb_buffer_pool_read_ahead_evicted = ${mysql_data[Innodb_buffer_pool_read_ahead_evicted]}
-SET Innodb_buffer_pool_read_ahead_rnd = ${mysql_data[Innodb_buffer_pool_read_ahead_rnd]}
-END
-BEGIN mysql_$x.innodb_buffer_pool_reqs $1
-SET Innodb_buffer_pool_read_requests = ${mysql_data[Innodb_buffer_pool_read_requests]}
-SET Innodb_buffer_pool_write_requests = ${mysql_data[Innodb_buffer_pool_write_requests]}
-END
-BEGIN mysql_$x.innodb_buffer_pool_ops $1
-SET Innodb_buffer_pool_reads = ${mysql_data[Innodb_buffer_pool_reads]}
-SET Innodb_buffer_pool_wait_free = ${mysql_data[Innodb_buffer_pool_wait_free]}
-END
-BEGIN mysql_$x.qcache_ops $1
-SET Qcache_hits hits = ${mysql_data[Qcache_hits]}
-SET Qcache_lowmem_prunes = ${mysql_data[Qcache_lowmem_prunes]}
-SET Qcache_inserts = ${mysql_data[Qcache_inserts]}
-SET Qcache_not_cached = ${mysql_data[Qcache_not_cached]}
-END
-BEGIN mysql_$x.qcache $1
-SET Qcache_queries_in_cache = ${mysql_data[Qcache_queries_in_cache]}
-END
-BEGIN mysql_$x.qcache_freemem $1
-SET Qcache_free_memory = ${mysql_data[Qcache_free_memory]}
-END
-BEGIN mysql_$x.qcache_memblocks $1
-SET Qcache_free_blocks = ${mysql_data[Qcache_free_blocks]}
-SET Qcache_total_blocks = ${mysql_data[Qcache_total_blocks]}
-END
-BEGIN mysql_$x.key_blocks $1
-SET Key_blocks_unused = ${mysql_data[Key_blocks_unused]}
-SET Key_blocks_used = ${mysql_data[Key_blocks_used]}
-SET Key_blocks_not_flushed = ${mysql_data[Key_blocks_not_flushed]}
-END
-BEGIN mysql_$x.key_requests $1
-SET Key_read_requests = ${mysql_data[Key_read_requests]}
-SET Key_write_requests = ${mysql_data[Key_write_requests]}
-END
-BEGIN mysql_$x.key_disk_ops $1
-SET Key_reads = ${mysql_data[Key_reads]}
-SET Key_writes = ${mysql_data[Key_writes]}
-END
-BEGIN mysql_$x.files $1
-SET Open_files = ${mysql_data[Open_files]}
-END
-BEGIN mysql_$x.files_rate $1
-SET Opened_files = ${mysql_data[Opened_files]}
-END
-VALUESEOF
-
- if [ ! -z "${mysql_data[Binlog_stmt_cache_disk_use]}" ]; then
- cat <<VALUESEOF
-BEGIN mysql_$x.binlog_stmt_cache $1
-SET Binlog_stmt_cache_disk_use = ${mysql_data[Binlog_stmt_cache_disk_use]}
-SET Binlog_stmt_cache_use = ${mysql_data[Binlog_stmt_cache_use]}
-END
-VALUESEOF
- fi
-
- if [ ! -z "${mysql_data[Connection_errors_accept]}" ]; then
- cat <<VALUESEOF
-BEGIN mysql_$x.connection_errors $1
-SET Connection_errors_accept = ${mysql_data[Connection_errors_accept]}
-SET Connection_errors_internal = ${mysql_data[Connection_errors_internal]}
-SET Connection_errors_max_connections = ${mysql_data[Connection_errors_max_connections]}
-SET Connection_errors_peer_addr = ${mysql_data[Connection_errors_peer_addr]}
-SET Connection_errors_select = ${mysql_data[Connection_errors_select]}
-SET Connection_errors_tcpwrap = ${mysql_data[Connection_errors_tcpwrap]}
-END
-VALUESEOF
- fi
- done
-
- [ ${#mysql_ids[@]} -eq 0 ] && error "no mysql servers left active." && return 1
- return 0
-}
diff --git a/collectors/charts.d.plugin/mysql/mysql.conf b/collectors/charts.d.plugin/mysql/mysql.conf
deleted file mode 100644
index 683e4af35..000000000
--- a/collectors/charts.d.plugin/mysql/mysql.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-#mysql_cmds[name]=""
-#mysql_opts[name]=""
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#mysql_update_every=2
-
-# the charts priority on the dashboard
-#mysql_priority=60000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#mysql_retries=10
diff --git a/collectors/charts.d.plugin/nginx/Makefile.inc b/collectors/charts.d.plugin/nginx/Makefile.inc
deleted file mode 100644
index c9d31aada..000000000
--- a/collectors/charts.d.plugin/nginx/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += nginx/nginx.chart.sh
-dist_chartsconfig_DATA += nginx/nginx.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += nginx/README.md nginx/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/nginx/README.md b/collectors/charts.d.plugin/nginx/README.md
deleted file mode 100644
index 57b4a4b12..000000000
--- a/collectors/charts.d.plugin/nginx/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# nginx
-
-> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/nginx) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/nginx/nginx.chart.sh b/collectors/charts.d.plugin/nginx/nginx.chart.sh
deleted file mode 100644
index 812de2cbb..000000000
--- a/collectors/charts.d.plugin/nginx/nginx.chart.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# if this chart is called X.chart.sh, then all functions and global variables
-# must start with X_
-
-nginx_url="http://127.0.0.1:80/stub_status"
-nginx_curl_opts=""
-
-# _update_every is a special variable - it holds the number of seconds
-# between the calls of the _update() function
-nginx_update_every=
-nginx_priority=60000
-
-declare -a nginx_response=()
-nginx_active_connections=0
-nginx_accepts=0
-nginx_handled=0
-nginx_requests=0
-nginx_reading=0
-nginx_writing=0
-nginx_waiting=0
-nginx_get() {
- # shellcheck disable=SC2207
- nginx_response=($(run curl -Ss ${nginx_curl_opts} "${nginx_url}"))
- # shellcheck disable=SC2181
- if [ $? -ne 0 ] || [ "${#nginx_response[@]}" -eq 0 ]; then return 1; fi
-
- if [ "${nginx_response[0]}" != "Active" ] ||
- [ "${nginx_response[1]}" != "connections:" ] ||
- [ "${nginx_response[3]}" != "server" ] ||
- [ "${nginx_response[4]}" != "accepts" ] ||
- [ "${nginx_response[5]}" != "handled" ] ||
- [ "${nginx_response[6]}" != "requests" ] ||
- [ "${nginx_response[10]}" != "Reading:" ] ||
- [ "${nginx_response[12]}" != "Writing:" ] ||
- [ "${nginx_response[14]}" != "Waiting:" ]; then
- error "Invalid response from nginx server: ${nginx_response[*]}"
- return 1
- fi
-
- nginx_active_connections="${nginx_response[2]}"
- nginx_accepts="${nginx_response[7]}"
- nginx_handled="${nginx_response[8]}"
- nginx_requests="${nginx_response[9]}"
- nginx_reading="${nginx_response[11]}"
- nginx_writing="${nginx_response[13]}"
- nginx_waiting="${nginx_response[15]}"
-
- if [ -z "${nginx_active_connections}" ] ||
- [ -z "${nginx_accepts}" ] ||
- [ -z "${nginx_handled}" ] ||
- [ -z "${nginx_requests}" ] ||
- [ -z "${nginx_reading}" ] ||
- [ -z "${nginx_writing}" ] ||
- [ -z "${nginx_waiting}" ]; then
- error "empty values got from nginx server: ${nginx_response[*]}"
- return 1
- fi
-
- return 0
-}
-
-# _check is called once, to find out if this chart should be enabled or not
-nginx_check() {
-
- nginx_get
- # shellcheck disable=2181
- if [ $? -ne 0 ]; then
- # shellcheck disable=SC2154
- error "cannot find stub_status on URL '${nginx_url}'. Please set nginx_url='http://nginx.server/stub_status' in $confd/nginx.conf"
- return 1
- fi
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- return 0
-}
-
-# _create is called once, to create the charts
-nginx_create() {
- cat <<EOF
-CHART nginx_local.connections '' "nginx Active Connections" "connections" nginx nginx.connections line $((nginx_priority + 1)) $nginx_update_every
-DIMENSION active '' absolute 1 1
-
-CHART nginx_local.requests '' "nginx Requests" "requests/s" nginx nginx.requests line $((nginx_priority + 2)) $nginx_update_every
-DIMENSION requests '' incremental 1 1
-
-CHART nginx_local.connections_status '' "nginx Active Connections by Status" "connections" nginx nginx.connections.status line $((nginx_priority + 3)) $nginx_update_every
-DIMENSION reading '' absolute 1 1
-DIMENSION writing '' absolute 1 1
-DIMENSION waiting idle absolute 1 1
-
-CHART nginx_local.connect_rate '' "nginx Connections Rate" "connections/s" nginx nginx.connections.rate line $((nginx_priority + 4)) $nginx_update_every
-DIMENSION accepts accepted incremental 1 1
-DIMENSION handled '' incremental 1 1
-EOF
-
- return 0
-}
-
-# _update is called continuously, to collect the values
-nginx_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- nginx_get || return 1
-
- # write the result of the work.
- cat <<VALUESEOF
-BEGIN nginx_local.connections $1
-SET active = $((nginx_active_connections))
-END
-BEGIN nginx_local.requests $1
-SET requests = $((nginx_requests))
-END
-BEGIN nginx_local.connections_status $1
-SET reading = $((nginx_reading))
-SET writing = $((nginx_writing))
-SET waiting = $((nginx_waiting))
-END
-BEGIN nginx_local.connect_rate $1
-SET accepts = $((nginx_accepts))
-SET handled = $((nginx_handled))
-END
-VALUESEOF
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/nginx/nginx.conf b/collectors/charts.d.plugin/nginx/nginx.conf
deleted file mode 100644
index c46100a58..000000000
--- a/collectors/charts.d.plugin/nginx/nginx.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-#nginx_url="http://127.0.0.1:80/stub_status"
-#nginx_curl_opts=""
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#nginx_update_every=
-
-# the charts priority on the dashboard
-#nginx_priority=60000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#nginx_retries=10
diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md
index ea93318f8..3f9c5f0a2 100644
--- a/collectors/charts.d.plugin/nut/README.md
+++ b/collectors/charts.d.plugin/nut/README.md
@@ -1,6 +1,12 @@
-# nut
+<!--
+title: "UPS/PDU monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/nut/README.md
+sidebar_label: "UPS/PDU"
+-->
-The plugin will collect UPS data for all UPSes configured in the system.
+# UPS/PDU monitoring with Netdata
+
+Collects UPS data for all power devices configured in the system.
The following charts will be created:
@@ -42,9 +48,17 @@ The following charts will be created:
- current temperature
-## configuration
+## Configuration
+
+Edit the `charts.d/nut.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/nut.conf
+```
-This is the internal default for `/etc/netdata/nut.conf`
+This is the internal default for `charts.d/nut.conf`
```sh
# a space separated list of UPS names
diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh
index 933d3561d..60233361e 100644
--- a/collectors/charts.d.plugin/nut/nut.chart.sh
+++ b/collectors/charts.d.plugin/nut/nut.chart.sh
@@ -28,59 +28,59 @@ declare -A nut_ids=()
declare -A nut_names=()
nut_get_all() {
- run -t $nut_timeout upsc -l
+ run -t $nut_timeout upsc -l
}
nut_get() {
- run -t $nut_timeout upsc "$1"
+ run -t $nut_timeout upsc "$1"
- if [ "${nut_clients_chart}" -eq "1" ]; then
- printf "ups.connected_clients: "
- run -t $nut_timeout upsc -c "$1" | wc -l
- fi
+ if [ "${nut_clients_chart}" -eq "1" ]; then
+ printf "ups.connected_clients: "
+ run -t $nut_timeout upsc -c "$1" | wc -l
+ fi
}
nut_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- local x
-
- require_cmd upsc || return 1
-
- [ -z "$nut_ups" ] && nut_ups="$(nut_get_all)"
-
- for x in $nut_ups; do
- nut_get "$x" >/dev/null
- # shellcheck disable=SC2181
- if [ $? -eq 0 ]; then
- if [ ! -z "${nut_names[${x}]}" ]; then
- nut_ids[$x]="$(fixid "${nut_names[${x}]}")"
- else
- nut_ids[$x]="$(fixid "$x")"
- fi
- continue
- fi
- error "cannot get information for NUT UPS '$x'."
- done
-
- if [ ${#nut_ids[@]} -eq 0 ]; then
- # shellcheck disable=SC2154
- error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf"
- return 1
- fi
-
- return 0
+ # this should return:
+ # - 0 to enable the chart
+ # - 1 to disable the chart
+
+ local x
+
+ require_cmd upsc || return 1
+
+ [ -z "$nut_ups" ] && nut_ups="$(nut_get_all)"
+
+ for x in $nut_ups; do
+ nut_get "$x" > /dev/null
+ # shellcheck disable=SC2181
+ if [ $? -eq 0 ]; then
+ if [ -n "${nut_names[${x}]}" ]; then
+ nut_ids[$x]="$(fixid "${nut_names[${x}]}")"
+ else
+ nut_ids[$x]="$(fixid "$x")"
+ fi
+ continue
+ fi
+ error "cannot get information for NUT UPS '$x'."
+ done
+
+ if [ ${#nut_ids[@]} -eq 0 ]; then
+ # shellcheck disable=SC2154
+ error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf"
+ return 1
+ fi
+
+ return 0
}
nut_create() {
- # create the charts
- local x
+ # create the charts
+ local x
- for x in "${nut_ids[@]}"; do
- cat <<EOF
+ for x in "${nut_ids[@]}"; do
+ cat << EOF
CHART nut_$x.charge '' "UPS Charge" "percentage" ups nut.charge area $((nut_priority + 1)) $nut_update_every
DIMENSION battery_charge charge absolute 1 100
@@ -115,30 +115,30 @@ CHART nut_$x.temp '' "UPS Temperature" "temperature" ups nut.temperature line $(
DIMENSION temp temp absolute 1 100
EOF
- if [ "${nut_clients_chart}" = "1" ]; then
- cat <<EOF2
+ if [ "${nut_clients_chart}" = "1" ]; then
+ cat << EOF2
CHART nut_$x.clients '' "UPS Connected Clients" "clients" ups nut.clients area $((nut_priority + 9)) $nut_update_every
DIMENSION clients '' absolute 1 1
EOF2
- fi
+ fi
- done
+ done
- return 0
+ return 0
}
nut_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
+ # do all the work to collect / calculate the values
+ # for each dimension
+ # remember: KEEP IT SIMPLE AND SHORT
- local i x
- for i in "${!nut_ids[@]}"; do
- x="${nut_ids[$i]}"
- nut_get "$i" | awk "
+ local i x
+ for i in "${!nut_ids[@]}"; do
+ x="${nut_ids[$i]}"
+ nut_get "$i" | awk "
BEGIN {
battery_charge = 0;
battery_runtime = 0;
@@ -223,10 +223,10 @@ END {
print \"END\"
}
}"
- # shellcheck disable=2181
- [ $? -ne 0 ] && unset "nut_ids[$i]" && error "failed to get values for '$i', disabling it."
- done
+ # shellcheck disable=2181
+ [ $? -ne 0 ] && unset "nut_ids[$i]" && error "failed to get values for '$i', disabling it."
+ done
- [ ${#nut_ids[@]} -eq 0 ] && error "no UPSes left active." && return 1
- return 0
+ [ ${#nut_ids[@]} -eq 0 ] && error "no UPSes left active." && return 1
+ return 0
}
diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md
index d41b41120..7575a1dad 100644
--- a/collectors/charts.d.plugin/opensips/README.md
+++ b/collectors/charts.d.plugin/opensips/README.md
@@ -1,7 +1,19 @@
-# OpenSIPS
+<!--
+title: "OpenSIPS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/opensips/README.md
+sidebar_label: "OpenSIPS"
+-->
-_Under construction_
+# OpenSIPS monitoring with Netdata
-Collects OpenSIPS metrics
+## Configuration
+
+Edit the `charts.d/opensips.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/opensips.conf
+```
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fopensips%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/opensips/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh
index b42462d6d..8ff3e32ef 100644
--- a/collectors/charts.d.plugin/opensips/opensips.chart.sh
+++ b/collectors/charts.d.plugin/opensips/opensips.chart.sh
@@ -14,40 +14,40 @@ opensips_timeout=2
opensips_priority=80000
opensips_get_stats() {
- run -t $opensips_timeout "$opensips_cmd" $opensips_opts |
- grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |
- sed \
- -e "s|[[:space:]]*[=:]\+[[:space:]]*\([0-9]\+\)[[:space:]]*$|=\1|g" \
- -e "s|[[:space:]:-]\+|_|g" \
- -e "s|^|opensips_|g"
-
- local ret=$?
- [ $ret -ne 0 ] && echo "opensips_command_failed=1"
- return $ret
+ run -t $opensips_timeout "$opensips_cmd" $opensips_opts |
+ grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |
+ sed \
+ -e "s|[[:space:]]*[=:]\+[[:space:]]*\([0-9]\+\)[[:space:]]*$|=\1|g" \
+ -e "s|[[:space:]:-]\+|_|g" \
+ -e "s|^|opensips_|g"
+
+ local ret=$?
+ [ $ret -ne 0 ] && echo "opensips_command_failed=1"
+ return $ret
}
opensips_check() {
- # if the user did not provide an opensips_cmd
- # try to find it in the system
- if [ -z "$opensips_cmd" ]; then
- require_cmd opensipsctl || return 1
- fi
-
- # check once if the command works
- local x
- x="$(opensips_get_stats | grep "^opensips_core_")"
- # shellcheck disable=SC2181
- if [ ! $? -eq 0 ] || [ -z "$x" ]; then
- error "cannot get global status. Please set opensips_opts='options' whatever needed to get connected to opensips server, in $confd/opensips.conf"
- return 1
- fi
-
- return 0
+ # if the user did not provide an opensips_cmd
+ # try to find it in the system
+ if [ -z "$opensips_cmd" ]; then
+ require_cmd opensipsctl || return 1
+ fi
+
+ # check once if the command works
+ local x
+ x="$(opensips_get_stats | grep "^opensips_core_")"
+ # shellcheck disable=SC2181
+ if [ ! $? -eq 0 ] || [ -z "$x" ]; then
+ error "cannot get global status. Please set opensips_opts='options' whatever needed to get connected to opensips server, in $confd/opensips.conf"
+ return 1
+ fi
+
+ return 0
}
opensips_create() {
- # create the charts
- cat <<EOF
+ # create the charts
+ cat << EOF
CHART opensips.dialogs_active '' "OpenSIPS Active Dialogs" "dialogs" dialogs '' area $((opensips_priority + 1)) $opensips_update_every
DIMENSION dialog_active_dialogs active absolute 1 1
DIMENSION dialog_early_dialogs early absolute -1 1
@@ -141,91 +141,91 @@ CHART opensips.shmem_fragments '' "OpenSIPS Shared Memory Fragmentation" "fragme
DIMENSION shmem_fragments fragments absolute 1 1
EOF
- return 0
+ return 0
}
opensips_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
-
- # 1. get the counters page from opensips
- # 2. sed to remove spaces; replace . with _; remove spaces around =; prepend each line with: local opensips_
- # 3. egrep lines starting with:
- # local opensips_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
- # local opensips_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
- # 4. then execute this as a script with the eval
- # be very carefull with eval:
- # prepare the script and always grep at the end the lines that are usefull, so that
- # even if something goes wrong, no other code can be executed
-
- unset \
- opensips_dialog_active_dialogs \
- opensips_dialog_early_dialogs \
- opensips_usrloc_registered_users \
- opensips_usrloc_location_users \
- opensips_usrloc_location_contacts \
- opensips_usrloc_location_expires \
- opensips_registrar_accepted_regs \
- opensips_registrar_rejected_regs \
- opensips_tm_UAS_transactions \
- opensips_tm_UAC_transactions \
- opensips_core_rcv_requests \
- opensips_core_rcv_replies \
- opensips_core_fwd_requests \
- opensips_core_fwd_replies \
- opensips_core_drop_requests \
- opensips_core_drop_replies \
- opensips_core_err_requests \
- opensips_core_err_replies \
- opensips_core_bad_URIs_rcvd \
- opensips_core_unsupported_methods \
- opensips_core_bad_msg_hdr \
- opensips_tm_received_replies \
- opensips_tm_relayed_replies \
- opensips_tm_local_replies \
- opensips_tm_2xx_transactions \
- opensips_tm_3xx_transactions \
- opensips_tm_4xx_transactions \
- opensips_tm_5xx_transactions \
- opensips_tm_6xx_transactions \
- opensips_tm_inuse_transactions \
- opensips_sl_1xx_replies \
- opensips_sl_2xx_replies \
- opensips_sl_3xx_replies \
- opensips_sl_4xx_replies \
- opensips_sl_5xx_replies \
- opensips_sl_6xx_replies \
- opensips_sl_sent_replies \
- opensips_sl_sent_err_replies \
- opensips_sl_received_ACKs \
- opensips_dialog_processed_dialogs \
- opensips_dialog_expired_dialogs \
- opensips_dialog_failed_dialogs \
- opensips_net_waiting_udp \
- opensips_net_waiting_tcp \
- opensips_uri_positive_checks \
- opensips_uri_negative_checks \
- opensips_siptrace_traced_requests \
- opensips_siptrace_traced_replies \
- opensips_shmem_total_size \
- opensips_shmem_used_size \
- opensips_shmem_real_used_size \
- opensips_shmem_max_used_size \
- opensips_shmem_free_size \
- opensips_shmem_fragments
-
- opensips_command_failed=0
- eval "local $(opensips_get_stats)"
- # shellcheck disable=SC2181
- [ $? -ne 0 ] && return 1
-
- [ $opensips_command_failed -eq 1 ] && error "failed to get values, disabling." && return 1
-
- # write the result of the work.
- cat <<VALUESEOF
+ # the first argument to this function is the microseconds since last update
+ # pass this parameter to the BEGIN statement (see bellow).
+
+ # do all the work to collect / calculate the values
+ # for each dimension
+
+ # 1. get the counters page from opensips
+ # 2. sed to remove spaces; replace . with _; remove spaces around =; prepend each line with: local opensips_
+ # 3. egrep lines starting with:
+ # local opensips_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # local opensips_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
+ # 4. then execute this as a script with the eval
+ # be very carefull with eval:
+ # prepare the script and always grep at the end the lines that are usefull, so that
+ # even if something goes wrong, no other code can be executed
+
+ unset \
+ opensips_dialog_active_dialogs \
+ opensips_dialog_early_dialogs \
+ opensips_usrloc_registered_users \
+ opensips_usrloc_location_users \
+ opensips_usrloc_location_contacts \
+ opensips_usrloc_location_expires \
+ opensips_registrar_accepted_regs \
+ opensips_registrar_rejected_regs \
+ opensips_tm_UAS_transactions \
+ opensips_tm_UAC_transactions \
+ opensips_core_rcv_requests \
+ opensips_core_rcv_replies \
+ opensips_core_fwd_requests \
+ opensips_core_fwd_replies \
+ opensips_core_drop_requests \
+ opensips_core_drop_replies \
+ opensips_core_err_requests \
+ opensips_core_err_replies \
+ opensips_core_bad_URIs_rcvd \
+ opensips_core_unsupported_methods \
+ opensips_core_bad_msg_hdr \
+ opensips_tm_received_replies \
+ opensips_tm_relayed_replies \
+ opensips_tm_local_replies \
+ opensips_tm_2xx_transactions \
+ opensips_tm_3xx_transactions \
+ opensips_tm_4xx_transactions \
+ opensips_tm_5xx_transactions \
+ opensips_tm_6xx_transactions \
+ opensips_tm_inuse_transactions \
+ opensips_sl_1xx_replies \
+ opensips_sl_2xx_replies \
+ opensips_sl_3xx_replies \
+ opensips_sl_4xx_replies \
+ opensips_sl_5xx_replies \
+ opensips_sl_6xx_replies \
+ opensips_sl_sent_replies \
+ opensips_sl_sent_err_replies \
+ opensips_sl_received_ACKs \
+ opensips_dialog_processed_dialogs \
+ opensips_dialog_expired_dialogs \
+ opensips_dialog_failed_dialogs \
+ opensips_net_waiting_udp \
+ opensips_net_waiting_tcp \
+ opensips_uri_positive_checks \
+ opensips_uri_negative_checks \
+ opensips_siptrace_traced_requests \
+ opensips_siptrace_traced_replies \
+ opensips_shmem_total_size \
+ opensips_shmem_used_size \
+ opensips_shmem_real_used_size \
+ opensips_shmem_max_used_size \
+ opensips_shmem_free_size \
+ opensips_shmem_fragments
+
+ opensips_command_failed=0
+ eval "local $(opensips_get_stats)"
+ # shellcheck disable=SC2181
+ [ $? -ne 0 ] && return 1
+
+ [ $opensips_command_failed -eq 1 ] && error "failed to get values, disabling." && return 1
+
+ # write the result of the work.
+ cat << VALUESEOF
BEGIN opensips.dialogs_active $1
SET dialog_active_dialogs = $opensips_dialog_active_dialogs
SET dialog_early_dialogs = $opensips_dialog_early_dialogs
@@ -320,5 +320,5 @@ SET shmem_fragments = $opensips_shmem_fragments
END
VALUESEOF
- return 0
+ return 0
}
diff --git a/collectors/charts.d.plugin/phpfpm/Makefile.inc b/collectors/charts.d.plugin/phpfpm/Makefile.inc
deleted file mode 100644
index 56bff6102..000000000
--- a/collectors/charts.d.plugin/phpfpm/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += phpfpm/phpfpm.chart.sh
-dist_chartsconfig_DATA += phpfpm/phpfpm.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/phpfpm/README.md b/collectors/charts.d.plugin/phpfpm/README.md
deleted file mode 100644
index f8976301b..000000000
--- a/collectors/charts.d.plugin/phpfpm/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# phpfm
-
-> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/phpfpm) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
deleted file mode 100644
index b1edb2373..000000000
--- a/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
+++ /dev/null
@@ -1,169 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-# Contributed by @safeie with PR #276
-
-# first, you need open php-fpm status in php-fpm.conf
-# second, you need add status location in nginx.conf
-# you can see, https://easyengine.io/tutorials/php/fpm-status-page/
-
-declare -A phpfpm_urls=()
-declare -A phpfpm_curl_opts=()
-
-# _update_every is a special variable - it holds the number of seconds
-# between the calls of the _update() function
-phpfpm_update_every=
-phpfpm_priority=60000
-
-declare -a phpfpm_response=()
-phpfpm_pool=""
-phpfpm_start_time=""
-phpfpm_start_since=0
-phpfpm_accepted_conn=0
-phpfpm_listen_queue=0
-phpfpm_max_listen_queue=0
-phpfpm_listen_queue_len=0
-phpfpm_idle_processes=0
-phpfpm_active_processes=0
-phpfpm_total_processes=0
-phpfpm_max_active_processes=0
-phpfpm_max_children_reached=0
-phpfpm_slow_requests=0
-phpfpm_get() {
- local opts="${1}" url="${2}"
-
- # shellcheck disable=SC2207,2086
- phpfpm_response=($(run curl -Ss ${opts} "${url}"))
- # shellcheck disable=SC2181
- if [ $? -ne 0 ] || [ "${#phpfpm_response[@]}" -eq 0 ]; then
- return 1
- fi
-
- if [[ ${phpfpm_response[0]} != "pool:" || ${phpfpm_response[2]} != "process" || ${phpfpm_response[5]} != "start" || ${phpfpm_response[12]} != "accepted" || ${phpfpm_response[15]} != "listen" || ${phpfpm_response[16]} != "queue:" || ${phpfpm_response[26]} != "idle" || ${phpfpm_response[29]} != "active" || ${phpfpm_response[32]} != "total" ]]; then
- error "invalid response from phpfpm status server: ${phpfpm_response[*]}"
- return 1
- fi
-
- phpfpm_pool="${phpfpm_response[1]}"
- phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}"
- phpfpm_start_since="${phpfpm_response[11]}"
- phpfpm_accepted_conn="${phpfpm_response[14]}"
- phpfpm_listen_queue="${phpfpm_response[17]}"
- phpfpm_max_listen_queue="${phpfpm_response[21]}"
- phpfpm_listen_queue_len="${phpfpm_response[25]}"
- phpfpm_idle_processes="${phpfpm_response[28]}"
- phpfpm_active_processes="${phpfpm_response[31]}"
- phpfpm_total_processes="${phpfpm_response[34]}"
- phpfpm_max_active_processes="${phpfpm_response[38]}"
- phpfpm_max_children_reached="${phpfpm_response[42]}"
- if [ "${phpfpm_response[43]}" == "slow" ]; then
- phpfpm_slow_requests="${phpfpm_response[45]}"
- else
- phpfpm_slow_requests="-1"
- fi
-
- if [[ -z ${phpfpm_pool} || -z ${phpfpm_start_time} || -z ${phpfpm_start_since} || -z ${phpfpm_accepted_conn} || -z ${phpfpm_listen_queue} || -z ${phpfpm_max_listen_queue} || -z ${phpfpm_listen_queue_len} || -z ${phpfpm_idle_processes} || -z ${phpfpm_active_processes} || -z ${phpfpm_total_processes} || -z ${phpfpm_max_active_processes} || -z ${phpfpm_max_children_reached} ]]; then
- error "empty values got from phpfpm status server: ${phpfpm_response[*]}"
- return 1
- fi
-
- return 0
-}
-
-# _check is called once, to find out if this chart should be enabled or not
-phpfpm_check() {
- if [ ${#phpfpm_urls[@]} -eq 0 ]; then
- phpfpm_urls[local]="http://localhost/status"
- fi
-
- local m
- for m in "${!phpfpm_urls[@]}"; do
- phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}"
- # shellcheck disable=SC2181
- if [ $? -ne 0 ]; then
- # shellcheck disable=SC2154
- error "cannot find status on URL '${phpfpm_urls[$m]}'. Please set phpfpm_urls[$m]='http://localhost/status' in $confd/phpfpm.conf"
- unset "phpfpm_urls[$m]"
- continue
- fi
- done
-
- if [ ${#phpfpm_urls[@]} -eq 0 ]; then
- error "no phpfpm servers found. Please set phpfpm_urls[name]='url' to whatever needed to get status to the phpfpm server, in $confd/phpfpm.conf"
- return 1
- fi
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- return 0
-}
-
-# _create is called once, to create the charts
-phpfpm_create() {
- local m
- for m in "${!phpfpm_urls[@]}"; do
- cat <<EOF
-CHART phpfpm_$m.connections '' "PHP-FPM Active Connections" "connections" phpfpm phpfpm.connections line $((phpfpm_priority + 1)) $phpfpm_update_every
-DIMENSION active '' absolute 1 1
-DIMENSION maxActive 'max active' absolute 1 1
-DIMENSION idle '' absolute 1 1
-
-CHART phpfpm_$m.requests '' "PHP-FPM Requests" "requests/s" phpfpm phpfpm.requests line $((phpfpm_priority + 2)) $phpfpm_update_every
-DIMENSION requests '' incremental 1 1
-
-CHART phpfpm_$m.performance '' "PHP-FPM Performance" "status" phpfpm phpfpm.performance line $((phpfpm_priority + 3)) $phpfpm_update_every
-DIMENSION reached 'max children reached' absolute 1 1
-EOF
- if [ $((phpfpm_slow_requests)) -ne -1 ]; then
- echo "DIMENSION slow 'slow requests' absolute 1 1"
- fi
- done
-
- return 0
-}
-
-# _update is called continuously, to collect the values
-phpfpm_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- local m
- for m in "${!phpfpm_urls[@]}"; do
- phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}"
- # shellcheck disable=SC2181
- if [ $? -ne 0 ]; then
- continue
- fi
-
- # write the result of the work.
- cat <<EOF
-BEGIN phpfpm_$m.connections $1
-SET active = $((phpfpm_active_processes))
-SET maxActive = $((phpfpm_max_active_processes))
-SET idle = $((phpfpm_idle_processes))
-END
-BEGIN phpfpm_$m.requests $1
-SET requests = $((phpfpm_accepted_conn))
-END
-BEGIN phpfpm_$m.performance $1
-SET reached = $((phpfpm_max_children_reached))
-EOF
- if [ $((phpfpm_slow_requests)) -ne -1 ]; then
- echo "SET slow = $((phpfpm_slow_requests))"
- fi
- echo "END"
- done
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/phpfpm/phpfpm.conf b/collectors/charts.d.plugin/phpfpm/phpfpm.conf
deleted file mode 100644
index e4dd0231b..000000000
--- a/collectors/charts.d.plugin/phpfpm/phpfpm.conf
+++ /dev/null
@@ -1,27 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-# first, you need open php-fpm status in php-fpm.conf
-# second, you need add status location in nginx.conf
-# you can see, https://easyengine.io/tutorials/php/fpm-status-page/
-#phpfpm_urls[name]=""
-#phpfpm_curl_opts[name]=""
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#phpfpm_update_every=
-
-# the charts priority on the dashboard
-#phpfpm_priority=60000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#phpfpm_retries=10
-
diff --git a/collectors/charts.d.plugin/postfix/Makefile.inc b/collectors/charts.d.plugin/postfix/Makefile.inc
deleted file mode 100644
index 6e148352d..000000000
--- a/collectors/charts.d.plugin/postfix/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += postfix/postfix.chart.sh
-dist_chartsconfig_DATA += postfix/postfix.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += postfix/README.md postfix/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/postfix/README.md b/collectors/charts.d.plugin/postfix/README.md
deleted file mode 100644
index d9bf77f2f..000000000
--- a/collectors/charts.d.plugin/postfix/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# postfix
-
-> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/postfix) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-The plugin will collect the postfix queue size.
-
-It will create two charts:
-
-1. **queue size in emails**
-2. **queue size in KB**
-
-## configuration
-
-This is the internal default for `/etc/netdata/postfix.conf`
-
-```sh
-# the postqueue command
-# if empty, it will use the one found in the system path
-postfix_postqueue=
-
-# how frequently to collect queue size
-postfix_update_every=15
-```
-
----
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/postfix/postfix.chart.sh b/collectors/charts.d.plugin/postfix/postfix.chart.sh
deleted file mode 100644
index ff59db9fe..000000000
--- a/collectors/charts.d.plugin/postfix/postfix.chart.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-# shellcheck shell=bash disable=SC1117
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# the postqueue command
-# if empty, it will use the one found in the system path
-postfix_postqueue=
-
-# how frequently to collect queue size
-postfix_update_every=15
-
-postfix_priority=60000
-
-postfix_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- # try to find the postqueue executable
- if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]; then
- # shellcheck disable=SC2230
- postfix_postqueue="$(which postqueue 2>/dev/null || command -v postqueue 2>/dev/null)"
- fi
-
- if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]; then
- # shellcheck disable=SC2154
- error "cannot find postqueue. Please set 'postfix_postqueue=/path/to/postqueue' in $confd/postfix.conf"
- return 1
- fi
-
- return 0
-}
-
-postfix_create() {
- cat <<EOF
-CHART postfix_local.qemails '' "Postfix Queue Emails" "emails" queue postfix.queued.emails line $((postfix_priority + 1)) $postfix_update_every
-DIMENSION emails '' absolute 1 1
-CHART postfix_local.qsize '' "Postfix Queue Emails Size" "emails size in KB" queue postfix.queued.size area $((postfix_priority + 2)) $postfix_update_every
-DIMENSION size '' absolute 1 1
-EOF
-
- return 0
-}
-
-postfix_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- # 1. execute postqueue -p
- # 2. get the line that begins with --
- # 3. match the 2 numbers on the line and output 2 lines like these:
- # local postfix_q_size=NUMBER
- # local postfix_q_emails=NUMBER
- # 4. then execute this a script with the eval
- #
- # be very carefull with eval:
- # prepare the script and always egrep at the end the lines that are usefull, so that
- # even if something goes wrong, no other code can be executed
- postfix_q_emails=0
- postfix_q_size=0
-
- eval "$(run "$postfix_postqueue" -p |
- grep "^--" |
- sed -e "s/-- \([0-9]\+\) Kbytes in \([0-9]\+\) Requests.$/local postfix_q_size=\1\nlocal postfix_q_emails=\2/g" |
- grep -E "^local postfix_q_(emails|size)=[0-9]+$")"
-
- # write the result of the work.
- cat <<VALUESEOF
-BEGIN postfix_local.qemails $1
-SET emails = $postfix_q_emails
-END
-BEGIN postfix_local.qsize $1
-SET size = $postfix_q_size
-END
-VALUESEOF
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/postfix/postfix.conf b/collectors/charts.d.plugin/postfix/postfix.conf
deleted file mode 100644
index b77817bd6..000000000
--- a/collectors/charts.d.plugin/postfix/postfix.conf
+++ /dev/null
@@ -1,25 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-# the postqueue command
-# if empty, it will use the one found in the system path
-#postfix_postqueue=
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#postfix_update_every=15
-
-# the charts priority on the dashboard
-#postfix_priority=60000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#postfix_retries=10
-
diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md
index a3fa9d20f..cee3f601c 100644
--- a/collectors/charts.d.plugin/sensors/README.md
+++ b/collectors/charts.d.plugin/sensors/README.md
@@ -1,7 +1,12 @@
-# sensors
+<!--
+title: "Linux machine sensors monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/sensors/README.md
+-->
+
+# Linux machine sensors monitoring with Netdata
> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/sensors) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+> USE [THE PYTHON ONE](/collectors/python.d.plugin/sensors) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
>
> Unlike the python one, this module can collect temperature on RPi.
@@ -24,9 +29,17 @@ The plugin will create Netdata charts for:
One chart for every sensor chip found and each of the above will be created.
-## configuration
+## Configuration
+
+Edit the `charts.d/sensors.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config charts.d/sensors.conf
+```
-This is the internal default for `/etc/netdata/sensors.conf`
+This is the internal default for `charts.d/sensors.conf`
```sh
# the directory the kernel keeps sensor data
diff --git a/collectors/charts.d.plugin/squid/README.md b/collectors/charts.d.plugin/squid/README.md
deleted file mode 100644
index 831a04f73..000000000
--- a/collectors/charts.d.plugin/squid/README.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# squid
-
-> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/squid) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-The plugin will monitor a squid server.
-
-It will produce 4 charts:
-
-1. **Squid Client Bandwidth** in kbps
-
-- in
-- out
-- hits
-
-2. **Squid Client Requests** in requests/sec
-
-- requests
-- hits
-- errors
-
-3. **Squid Server Bandwidth** in kbps
-
-- in
-- out
-
-4. **Squid Server Requests** in requests/sec
-
-- requests
-- errors
-
-## autoconfig
-
-The plugin will by itself detect squid servers running on
-localhost, on ports 3128 or 8080.
-
-It will attempt to download URLs in the form:
-
-- `cache_object://HOST:PORT/counters`
-- `/squid-internal-mgr/counters`
-
-If any succeeds, it will use this.
-
-## configuration
-
-If you need to configure it by hand, create the file
-`/etc/netdata/squid.conf` with the following variables:
-
-- `squid_host=IP` the IP of the squid host
-- `squid_port=PORT` the port the squid is listening
-- `squid_url="URL"` the URL with the statistics to be fetched from squid
-- `squid_timeout=SECONDS` how much time we should wait for squid to respond
-- `squid_update_every=SECONDS` the frequency of the data collection
-
-Example `/etc/netdata/squid.conf`:
-
-```sh
-squid_host=127.0.0.1
-squid_port=3128
-squid_url="cache_object://127.0.0.1:3128/counters"
-squid_timeout=2
-squid_update_every=5
-```
-
----
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/squid/squid.chart.sh b/collectors/charts.d.plugin/squid/squid.chart.sh
deleted file mode 100644
index ebddb32c6..000000000
--- a/collectors/charts.d.plugin/squid/squid.chart.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-# shellcheck shell=bash disable=SC2154
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-squid_host=
-squid_port=
-squid_url=
-squid_update_every=2
-squid_priority=60000
-
-squid_get_stats_internal() {
- local host="$1" port="$2" url="$3"
- run squidclient -h "$host" -p "$port" "$url"
-}
-
-squid_get_stats() {
- squid_get_stats_internal "$squid_host" "$squid_port" "$squid_url"
-}
-
-squid_autodetect() {
- local host="127.0.0.1" port url x
-
- for port in 3128 8080; do
- for url in "cache_object://$host:$port/counters" "/squid-internal-mgr/counters"; do
- x=$(squid_get_stats_internal "$host" "$port" "$url" | grep client_http.requests)
- if [ ! -z "$x" ]; then
- squid_host="$host"
- squid_port="$port"
- squid_url="$url"
- debug "found squid at '$host:$port' with url '$url'"
- return 0
- fi
- done
- done
-
- error "cannot find squid running in localhost. Please set squid_url='url' and squid_host='IP' and squid_port='PORT' in $confd/squid.conf"
- return 1
-}
-
-squid_check() {
- require_cmd squidclient || return 1
- require_cmd sed || return 1
- require_cmd egrep || return 1
-
- if [ -z "$squid_host" ] || [ -z "$squid_port" ] || [ -z "$squid_url" ]; then
- squid_autodetect || return 1
- fi
-
- # check once if the url works
- local x
- x="$(squid_get_stats | grep client_http.requests)"
- # shellcheck disable=SC2181
- if [ ! $? -eq 0 ] || [ -z "$x" ]; then
- error "cannot fetch URL '$squid_url' by connecting to $squid_host:$squid_port. Please set squid_url='url' and squid_host='host' and squid_port='port' in $confd/squid.conf"
- return 1
- fi
-
- return 0
-}
-
-squid_create() {
- # create the charts
- cat <<EOF
-CHART squid_local.clients_net '' "Squid Client Bandwidth" "kilobits / sec" clients squid.clients.net area $((squid_priority + 1)) $squid_update_every
-DIMENSION client_http_kbytes_in in incremental 8 1
-DIMENSION client_http_kbytes_out out incremental -8 1
-DIMENSION client_http_hit_kbytes_out hits incremental -8 1
-
-CHART squid_local.clients_requests '' "Squid Client Requests" "requests / sec" clients squid.clients.requests line $((squid_priority + 3)) $squid_update_every
-DIMENSION client_http_requests requests incremental 1 1
-DIMENSION client_http_hits hits incremental 1 1
-DIMENSION client_http_errors errors incremental -1 1
-
-CHART squid_local.servers_net '' "Squid Server Bandwidth" "kilobits / sec" servers squid.servers.net area $((squid_priority + 2)) $squid_update_every
-DIMENSION server_all_kbytes_in in incremental 8 1
-DIMENSION server_all_kbytes_out out incremental -8 1
-
-CHART squid_local.servers_requests '' "Squid Server Requests" "requests / sec" servers squid.servers.requests line $((squid_priority + 4)) $squid_update_every
-DIMENSION server_all_requests requests incremental 1 1
-DIMENSION server_all_errors errors incremental -1 1
-EOF
-
- return 0
-}
-
-squid_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- # 1. get the counters page from squid
- # 2. sed to remove spaces; replace . with _; remove spaces around =; prepend each line with: local squid_
- # 3. egrep lines starting with:
- # local squid_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
- # local squid_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
- # 4. then execute this as a script with the eval
- #
- # be very carefull with eval:
- # prepare the script and always grep at the end the lines that are usefull, so that
- # even if something goes wrong, no other code can be executed
-
- # shellcheck disable=SC1117
- eval "$(squid_get_stats |
- sed -e "s/ \+/ /g" -e "s/\./_/g" -e "s/^\([a-z0-9_]\+\) *= *\([0-9]\+\)$/local squid_\1=\2/g" |
- grep -E "^local squid_(client_http|server_all)_[a-z0-9_]+=[0-9]+$")"
-
- # write the result of the work.
- cat <<VALUESEOF
-BEGIN squid_local.clients_net $1
-SET client_http_kbytes_in = $squid_client_http_kbytes_in
-SET client_http_kbytes_out = $squid_client_http_kbytes_out
-SET client_http_hit_kbytes_out = $squid_client_http_hit_kbytes_out
-END
-
-BEGIN squid_local.clients_requests $1
-SET client_http_requests = $squid_client_http_requests
-SET client_http_hits = $squid_client_http_hits
-SET client_http_errors = $squid_client_http_errors
-END
-
-BEGIN squid_local.servers_net $1
-SET server_all_kbytes_in = $squid_server_all_kbytes_in
-SET server_all_kbytes_out = $squid_server_all_kbytes_out
-END
-
-BEGIN squid_local.servers_requests $1
-SET server_all_requests = $squid_server_all_requests
-SET server_all_errors = $squid_server_all_errors
-END
-VALUESEOF
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/squid/squid.conf b/collectors/charts.d.plugin/squid/squid.conf
deleted file mode 100644
index 19e928f25..000000000
--- a/collectors/charts.d.plugin/squid/squid.conf
+++ /dev/null
@@ -1,26 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-#squid_host=
-#squid_port=
-#squid_url=
-#squid_timeout=2
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#squid_update_every=2
-
-# the charts priority on the dashboard
-#squid_priority=60000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#squid_retries=10
-
diff --git a/collectors/charts.d.plugin/tomcat/Makefile.inc b/collectors/charts.d.plugin/tomcat/Makefile.inc
deleted file mode 100644
index ef05b1953..000000000
--- a/collectors/charts.d.plugin/tomcat/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += tomcat/tomcat.chart.sh
-dist_chartsconfig_DATA += tomcat/tomcat.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/tomcat/README.md b/collectors/charts.d.plugin/tomcat/README.md
deleted file mode 100644
index 752332cfb..000000000
--- a/collectors/charts.d.plugin/tomcat/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# tomcat
-
-> THIS MODULE IS OBSOLETE.
-> USE [THE PYTHON ONE](../../python.d.plugin/tomcat) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/charts.d.plugin/tomcat/tomcat.chart.sh b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
deleted file mode 100644
index 9ca75e63e..000000000
--- a/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
+++ /dev/null
@@ -1,152 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-# Contributed by @jgeromero with PR #277
-
-# Description: Tomcat netdata charts.d plugin
-# Author: Jorge Romero
-
-# the URL to download tomcat status info
-# usually http://localhost:8080/manager/status?XML=true
-tomcat_url=""
-tomcat_curl_opts=""
-
-# set tomcat username/password here
-tomcat_user=""
-tomcat_password=""
-
-# _update_every is a special variable - it holds the number of seconds
-# between the calls of the _update() function
-tomcat_update_every=
-
-tomcat_priority=60000
-
-# convert tomcat floating point values
-# to integer using this multiplier
-# this only affects precision - the values
-# will be in the proper units
-tomcat_decimal_detail=1000000
-
-# used by volume chart to convert bytes to kB
-tomcat_decimal_kB_detail=1000
-
-tomcat_check() {
-
- require_cmd xmlstarlet || return 1
-
- # check if url, username, passwords are set
- if [ -z "${tomcat_url}" ]; then
- error "tomcat url is unset or set to the empty string"
- return 1
- fi
- if [ -z "${tomcat_user}" ]; then
- # check backwards compatibility
- # shellcheck disable=SC2154
- if [ -z "${tomcatUser}" ]; then
- error "tomcat user is unset or set to the empty string"
- return 1
- else
- tomcat_user="${tomcatUser}"
- fi
- fi
- if [ -z "${tomcat_password}" ]; then
- # check backwards compatibility
- # shellcheck disable=SC2154
- if [ -z "${tomcatPassword}" ]; then
- error "tomcat password is unset or set to the empty string"
- return 1
- else
- tomcat_password="${tomcatPassword}"
- fi
- fi
-
- # check if we can get to tomcat's status page
- tomcat_get
- # shellcheck disable=2181
- if [ $? -ne 0 ]; then
- error "cannot get to status page on URL '${tomcat_url}'. Please make sure tomcat url, username and password are correct."
- return 1
- fi
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- return 0
-}
-
-tomcat_get() {
- # collect tomcat values
- tomcat_port="$(
- IFS=/ read -ra a <<<"$tomcat_url"
- hostport=${a[2]}
- echo "${hostport#*:}"
- )"
- mapfile -t lines < <(run curl -u "$tomcat_user":"$tomcat_password" -Ss ${tomcat_curl_opts} "$tomcat_url" |
- run xmlstarlet sel \
- -t -m "/status/jvm/memory" -v @free \
- -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/threadInfo" -v @currentThreadCount \
- -n -v @currentThreadsBusy \
- -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/requestInfo" -v @requestCount \
- -n -v @bytesSent -n -)
-
- tomcat_jvm_freememory="${lines[0]}"
- tomcat_threads="${lines[1]}"
- tomcat_threads_busy="${lines[2]}"
- tomcat_accesses="${lines[3]}"
- tomcat_volume="${lines[4]}"
-
- return 0
-}
-
-# _create is called once, to create the charts
-tomcat_create() {
- cat <<EOF
-CHART tomcat.accesses '' "tomcat requests" "requests/s" statistics tomcat.accesses area $((tomcat_priority + 8)) $tomcat_update_every
-DIMENSION accesses '' incremental
-CHART tomcat.volume '' "tomcat volume" "kB/s" volume tomcat.volume area $((tomcat_priority + 5)) $tomcat_update_every
-DIMENSION volume '' incremental divisor ${tomcat_decimal_kB_detail}
-CHART tomcat.threads '' "tomcat threads" "current threads" statistics tomcat.threads line $((tomcat_priority + 6)) $tomcat_update_every
-DIMENSION current '' absolute 1
-DIMENSION busy '' absolute 1
-CHART tomcat.jvm '' "JVM Free Memory" "MB" statistics tomcat.jvm area $((tomcat_priority + 8)) $tomcat_update_every
-DIMENSION jvm '' absolute 1 ${tomcat_decimal_detail}
-EOF
- return 0
-}
-
-# _update is called continuously, to collect the values
-tomcat_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see bellow).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- tomcat_get || return 1
-
- # write the result of the work.
- cat <<VALUESEOF
-BEGIN tomcat.accesses $1
-SET accesses = $((tomcat_accesses))
-END
-BEGIN tomcat.volume $1
-SET volume = $((tomcat_volume))
-END
-BEGIN tomcat.threads $1
-SET current = $((tomcat_threads))
-SET busy = $((tomcat_threads_busy))
-END
-BEGIN tomcat.jvm $1
-SET jvm = $((tomcat_jvm_freememory))
-END
-VALUESEOF
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/tomcat/tomcat.conf b/collectors/charts.d.plugin/tomcat/tomcat.conf
deleted file mode 100644
index e9f3eefa9..000000000
--- a/collectors/charts.d.plugin/tomcat/tomcat.conf
+++ /dev/null
@@ -1,38 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-# the URL to download tomcat status info
-# usually http://localhost:8080/manager/status?XML=true
-#tomcat_url=""
-#tomcat_curl_opts=""
-
-# set tomcat username/password here
-#tomcat_user=""
-#tomcat_password=""
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#tomcat_update_every=1
-
-# the charts priority on the dashboard
-#tomcat_priority=60000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#tomcat_retries=10
-
-# convert tomcat floating point values
-# to integer using this multiplier
-# this only affects precision - the values
-# will be in the proper units
-#tomcat_decimal_detail=1000000
-
-# used by volume chart to convert bytes to KB
-#tomcat_decimal_KB_detail=1000
diff --git a/collectors/checks.plugin/Makefile.in b/collectors/checks.plugin/Makefile.in
deleted file mode 100644
index 921efafda..000000000
--- a/collectors/checks.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/checks.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/checks.plugin/README.md b/collectors/checks.plugin/README.md
index 4510795d1..5f1a6b912 100644
--- a/collectors/checks.plugin/README.md
+++ b/collectors/checks.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "checks.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/checks.plugin/README.md
+-->
+
# checks.plugin
A debugging plugin (by default it is disabled)
diff --git a/collectors/cups.plugin/Makefile.in b/collectors/cups.plugin/Makefile.in
deleted file mode 100644
index 63dcac1f0..000000000
--- a/collectors/cups.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/cups.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/cups.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/cups.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/cups.plugin/README.md b/collectors/cups.plugin/README.md
index b8d56387d..373602dca 100644
--- a/collectors/cups.plugin/README.md
+++ b/collectors/cups.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "cups.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/cups.plugin/README.md
+-->
+
# cups.plugin
`cups.plugin` collects Common Unix Printing System (CUPS) metrics.
@@ -53,3 +58,5 @@ For each destination the plugin provides these charts:
- processing
At the moment only job status pending, processing, and held are reported because we do not have a method to collect stopped, canceled, aborted and completed jobs which scales.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2cups.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/diskspace.plugin/Makefile.in b/collectors/diskspace.plugin/Makefile.in
deleted file mode 100644
index c1d2d8bef..000000000
--- a/collectors/diskspace.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/diskspace.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md
index 080140e40..a8b41c861 100644
--- a/collectors/diskspace.plugin/README.md
+++ b/collectors/diskspace.plugin/README.md
@@ -1,3 +1,9 @@
+<!--
+title: "diskspace.plugin"
+description: "Monitor the disk usage space of mounted disks in real-time with the Netdata Agent, plus preconfigured alarms for disks at risk of filling up."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/diskspace.plugin/README.md
+-->
+
# diskspace.plugin
This plugin monitors the disk space usage of mounted disks, under Linux. The plugin requires Netdata to have execute/search permissions on the mount point itself, as well as each component of the absolute path to the mount point.
@@ -19,7 +25,7 @@ By default, Netdata will enable monitoring metrics only when they are not zero.
# update every = 1
# check for new mount points every = 15
# exclude space metrics on paths = /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*
- # exclude space metrics on filesystems = *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl
+ # exclude space metrics on filesystems = *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs
# space usage for all disks = auto
# inodes usage for all disks = auto
```
@@ -32,6 +38,6 @@ Charts can be enabled/disabled for every mount separately:
# inodes usage = auto
```
-> for disks performance monitoring, see the `proc` plugin, [here](../proc.plugin/#monitoring-disks)
+> for disks performance monitoring, see the `proc` plugin, [here](/collectors/proc.plugin/README.md#monitoring-disks)
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fdiskspace.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c
index eab607d84..4010e5759 100644
--- a/collectors/diskspace.plugin/plugin_diskspace.c
+++ b/collectors/diskspace.plugin/plugin_diskspace.c
@@ -5,7 +5,7 @@
#define PLUGIN_DISKSPACE_NAME "diskspace.plugin"
#define DELAULT_EXCLUDED_PATHS "/proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*"
-#define DEFAULT_EXCLUDED_FILESYSTEMS "*gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl"
+#define DEFAULT_EXCLUDED_FILESYSTEMS "*gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs"
#define CONFIG_SECTION_DISKSPACE "plugin:proc:diskspace"
static struct mountinfo *disk_mountinfo_root = NULL;
@@ -254,7 +254,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
if(unlikely(!m->st_space)) {
m->do_space = CONFIG_BOOLEAN_YES;
- m->st_space = rrdset_find_bytype_localhost("disk_space", disk);
+ m->st_space = rrdset_find_active_bytype_localhost("disk_space", disk);
if(unlikely(!m->st_space)) {
char title[4096 + 1];
snprintfz(title, 4096, "Disk Space Usage for %s [%s]", family, mi->mount_source);
@@ -296,7 +296,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
if(unlikely(!m->st_inodes)) {
m->do_inodes = CONFIG_BOOLEAN_YES;
- m->st_inodes = rrdset_find_bytype_localhost("disk_inodes", disk);
+ m->st_inodes = rrdset_find_active_bytype_localhost("disk_inodes", disk);
if(unlikely(!m->st_inodes)) {
char title[4096 + 1];
snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", family, mi->mount_source);
diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am
new file mode 100644
index 000000000..1327d47a6
--- /dev/null
+++ b/collectors/ebpf.plugin/Makefile.am
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ reset_netdata_trace.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ reset_netdata_trace.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ reset_netdata_trace.sh.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ ebpf.conf \
+ ebpf_kernel_reject_list.txt \
+ $(NULL)
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
new file mode 100644
index 000000000..5ea3b4951
--- /dev/null
+++ b/collectors/ebpf.plugin/README.md
@@ -0,0 +1,400 @@
+<!--
+title: "eBPF monitoring with Netdata"
+description: "Use Netdata's extended Berkeley Packet Filter (eBPF) collector to monitor kernel-level metrics about your complex applications with per-second granularity."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/README.md
+sidebar_label: "eBPF"
+-->
+
+# eBPF monitoring with Netdata
+
+Netdata's extended Berkeley Packet Filter (eBPF) collector monitors kernel-level metrics for file descriptors, virtual
+filesystem IO, and process management on Linux systems. You can use our eBPF collector to analyze how and when a process
+accesses files, when it makes system calls, whether it leaks memory or creating zombie processes, and more.
+
+Netdata's eBPF monitoring toolkit uses two custom eBPF programs. The default, called `entry`, monitors calls to a
+variety of kernel functions, such as `do_sys_open`, `__close_fd`, `vfs_read`, `vfs_write`, `_do_fork`, and more. The
+`return` program also monitors the return of each kernel functions to deliver more granular metrics about how your
+system and its applications interact with the Linux kernel.
+
+eBPF monitoring can help you troubleshoot and debug how applications interact with the Linux kernel. See our [guide on
+troubleshooting apps with eBPF metrics](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md) for configuration
+and troubleshooting tips.
+
+<figure>
+ <img src="https://user-images.githubusercontent.com/1153921/74746434-ad6a1e00-5222-11ea-858a-a7882617ae02.png" alt="An example of VFS charts, made possible by the eBPF collector plugin" />
+ <figcaption>An example of VFS charts made possible by the eBPF collector plugin.</figcaption>
+</figure>
+
+## Enable the collector on Linux
+
+**The eBPF collector is installed and enabled by default on most new installations of the Agent**. The eBPF collector
+does not currently work with [static build installations](/packaging/installer/methods/kickstart-64.md), but improved
+support is in active development.
+
+eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`,
+and all kernels on CentOS 7.6 or later.
+
+If your Agent is v1.22 or older, you may to enable the collector yourself. See the [configuration](#configuration)
+section for details.
+
+## Charts
+
+The eBPF collector creates an **eBPF** menu in the Agent's dashboard along with three sub-menus: **File**, **VFS**, and
+**Process**. All the charts in this section update every second. The collector stores the actual value inside of its
+process, but charts only show the difference between the values collected in the previous and current seconds.
+
+### File
+
+This group has two charts demonstrating how software interacts with the Linux kernel to open and close file descriptors.
+
+#### File descriptor
+
+This chart contains two dimensions that show the number of calls to the functions `do_sys_open` and `__close_fd`. Most
+software do not commonly call these functions directly, but they are behind the system calls `open(2)`, `openat(2)`,
+and `close(2)`.
+
+#### File error
+
+This chart shows the number of times some software tried and failed to open or close a file descriptor.
+
+### VFS
+
+A [virtual file system](https://en.wikipedia.org/wiki/Virtual_file_system) (VFS) is a layer on top of regular
+filesystems. The functions present inside this API are used for all filesystems, so it's possible the charts in this
+group won't show _all_ the actions that occurred on your system.
+
+#### Deleted objects
+
+This chart monitors calls for `vfs_unlink`. This function is responsible for removing objects from the file system.
+
+#### IO
+
+This chart shows the number of calls to the functions `vfs_read` and `vfs_write`.
+
+#### IO bytes
+
+This chart also monitors `vfs_read` and `vfs_write`, but instead shows the total of bytes read and written with these
+functions.
+
+The Agent displays the number of bytes written as negative because they are moving down to disk.
+
+#### IO errors
+
+The Agent counts and shows the number of instances where a running program experiences a read or write error.
+
+### Process
+
+For this group, the eBPF collector monitors process/thread creation and process end, and then displays any errors in the
+following charts.
+
+#### Process thread
+
+Internally, the Linux kernel treats both processes and threads as `tasks`. To create a thread, the kernel offers a few
+system calls: `fork(2)`, `vfork(2)` and `clone(2)`. In turn, each of these system calls use the function `_do_fork`. To
+generate this chart, the eBPF collector monitors `_do_fork` to populate the `process` dimension, and monitors
+`sys_clone` to identify threads.
+
+#### Exit
+
+Ending a task requires two steps. The first is a call to the internal function `do_exit`, which notifies the operating
+system that the task is finishing its work. The second step is to release the kernel information with the internal
+function `release_task`. The difference between the two dimensions can help you discover [zombie
+processes](https://en.wikipedia.org/wiki/Zombie_process).
+
+#### Task error
+
+The functions responsible for ending tasks do not return values, so this chart contains information about failures on
+process and thread creation.
+
+## Configuration
+
+Enable or disable the entire eBPF collector by editing `netdata.conf`.
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+./edit-config netdata.conf
+```
+
+To enable the collector, scroll down to the `[plugins]` section ensure the relevant line references `ebpf` (not
+`ebpf_process`), is uncommented, and is set to `yes`.
+
+```conf
+[plugins]
+ ebpf = yes
+```
+
+You can also configure the eBPF collector's behavior by editing `ebpf.conf`.
+
+```bash
+cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
+./edit-config ebpf.conf
+```
+
+### `[global]`
+
+The `[global]` section defines settings for the whole eBPF collector.
+
+#### ebpf load mode
+
+The collector has two different eBPF programs. These programs monitor the same functions inside the kernel, but they
+monitor, process, and display different kinds of information.
+
+By default, this plugin uses the `entry` mode. Changing this mode can create significant overhead on your operating
+system, but also offer valuable information if you are developing or debugging software. The `ebpf load mode` option
+accepts the following values: ​
+
+- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described
+ in the sections above, and does not show charts related to errors.
+- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+ new charts for the return of these functions, such as errors. Monitoring function returns can help in debugging
+ software, such as failing to close file descriptors or creating zombie processes.
+
+#### Integration with `apps.plugin`
+
+The eBPF collector also creates charts for each running application through an integration with the
+[`apps.plugin`](/collectors/apps.plugin/README.md). This integration helps you understand how specific applications
+interact with the Linux kernel.
+
+When the integration is enabled, your dashboard will also show the following charts using low-level Linux metrics:
+
+- eBPF file
+ - Number of calls to open files. (`apps.file_open`)
+ - Number of files closed. (`apps.file_closed`)
+ - Number of calls to open files that returned errors.
+ - Number of calls to close files that returned errors.
+- eBPF syscall
+ - Number of calls to delete files. (`apps.file_deleted`)
+ - Number of calls to `vfs_write`. (`apps.vfs_write_call`)
+ - Number of calls to `vfs_read`. (`apps.vfs_read_call`)
+ - Number of bytes written with `vfs_write`. (`apps.vfs_write_bytes`)
+ - Number of bytes read with `vfs_read`. (`apps.vfs_read_bytes`)
+ - Number of calls to write a file that returned errors.
+ - Number of calls to read a file that returned errors.
+- eBPF process
+ - Number of process created with `do_fork`. (`apps.process_create`)
+ - Number of threads created with `do_fork` or `__x86_64_sys_clone`, depending on your system's kernel version. (`apps.thread_create`)
+ - Number of times that a process called `do_exit`. (`apps.task_close`)
+- eBPF net
+ - Number of bytes sent. (`apps.bandwidth_sent`)
+ - Number of bytes received. (`apps.bandwidth_recv`)
+
+If you want to _disable_ the integration with `apps.plugin` along with the above charts, change the setting `apps` to
+`no`.
+
+```conf
+[global]
+ apps = yes
+```
+
+### `[ebpf programs]`
+
+The eBPF collector enables and runs the following eBPF programs by default:
+
+- `process`: This eBPF program creates charts that show information about process creation, VFS IO, and files removed.
+ When in `return` mode, it also creates charts showing errors when these operations are executed.
+- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
+ bandwidth consumed by each.
+
+### `[network connections]`
+
+You can configure the information shown on `outbound` and `inbound` charts with the settings in this section.
+
+```conf
+[network connections]
+ maximum dimensions = 500
+ resolve hostname ips = no
+ ports = 1-1024 !145 !domain
+ hostnames = !example.com
+ ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7
+```
+
+When you define a `ports` setting, Netdata will collect network metrics for that specific port. For example, if you
+write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts
+[simple patterns](/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to
+ deny specific values or asterisk alone to define all values.
+
+In the above example, Netdata will collect metrics for all ports between 1 and 443, with the exception of 53 (domain)
+and 145.
+
+The following options are available:
+
+- `ports`: Define the destination ports for Netdata to monitor.
+- `hostnames`: The list of hostnames that can be resolved to an IP address.
+- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a
+ range of IPs, or use CIDR values. The default behavior is to only collect data for private IP addresses, but this
+ can be changed with the `ips` setting.
+
+By default, Netdata displays up to 500 dimensions on network connection charts. If there are more possible dimensions,
+they will be bundled into the `other` dimension. You can increase the number of shown dimensions by changing the `maximum
+dimensions` setting.
+
+The dimensions for the traffic charts are created using the destination IPs of the sockets by default. This can be
+changed setting `resolve hostname ips = yes` and restarting Netdata, after this Netdata will create dimensions using
+the `hostnames` every time that is possible to resolve IPs to their hostnames.
+
+### `[service name]`
+
+Netdata uses the list of services in `/etc/services` to plot network connection charts. If this file does not contain the
+name for a particular service you use in your infrastructure, you will need to add it to the `[service name]` section.
+
+For example, Netdata's default port (`19999`) is not listed in `/etc/services`. To associate that port with the Netdata
+service in network connection charts, and thus see the name of the service instead of its port, define it:
+
+```conf
+[service name]
+ 19999 = Netdata
+```
+
+## Troubleshooting
+
+If the eBPF collector does not work, you can troubleshoot it by running the `ebpf.plugin` command and investigating its
+output.
+
+```bash
+cd /usr/libexec/netdata/plugins.d/
+sudo su -s /bin/bash netdata
+./ebpf.plugin
+```
+
+You can also use `grep` to search the Agent's `error.log` for messages related to eBPF monitoring.
+
+```bash
+grep -i ebpf /var/log/netdata/error.log
+```
+
+### Confirm kernel compatibility
+
+The eBPF collector only works on Linux systems and with specific Linux kernels. We support all kernels more recent than
+`4.11.0`, and all kernels on CentOS 7.6 or later.
+
+You can run our helper script to determine whether your system can support eBPF monitoring.
+
+```bash
+curl -sSL https://raw.githubusercontent.com/netdata/kernel-collector/master/tools/check-kernel-config.sh | sudo bash
+```
+
+If this script returns no output, your system is ready to compile and run the eBPF collector.
+
+If you see a warning about a missing kernel configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL
+BPF_JIT`), you will need to recompile your kernel to support this configuration. The process of recompiling Linux
+kernels varies based on your distribution and version. Read the documentation for your system's distribution to learn
+more about the specific workflow for recompiling the kernel, ensuring that you set all the necessary
+
+- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel)
+- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official)
+- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel)
+- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel)
+- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation)
+- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding)
+
+### Mount `debugfs` and `tracefs`
+
+The eBPF collector also requires both the `tracefs` and `debugfs` filesystems. Try mounting the `tracefs` and `debugfs`
+filesystems using the commands below:
+
+```bash
+sudo mount -t debugfs nodev /sys/kernel/debug
+sudo mount -t tracefs nodev /sys/kernel/tracing
+```
+
+If they are already mounted, you will see an error. You can also configure your system's `/etc/fstab` configuration to
+mount these filesystems on startup. More information can be found in the [ftrace documentation](https://www.kernel.org/doc/Documentation/trace/ftrace.txt).
+
+## Performance
+
+Because eBPF monitoring is complex, we are evaluating the performance of this new collector in various real-world
+conditions, across various system loads, and when monitoring complex applications.
+
+Our [initial testing](https://github.com/netdata/netdata/issues/8195) shows the performance of the eBPF collector is
+nearly identical to our [apps.plugin collector](/collectors/apps.plugin/README.md), despite collecting and displaying
+much more sophisticated metrics. You can now use the eBPF to gather deeper insights without affecting the performance of
+your complex applications at any load.
+
+## SELinux
+
+When [SELinux](https://www.redhat.com/en/topics/linux/what-is-selinux) is enabled, it may prevent `ebpf.plugin` from
+starting correctly. Check the Agent's `error.log` file for errors like the ones below:
+
+```bash
+2020-06-14 15:32:08: ebpf.plugin ERROR : EBPF PROCESS : Cannot load program: /usr/libexec/netdata/plugins.d/pnetdata_ebpf_process.3.10.0.o (errno 13, Permission denied)
+2020-06-14 15:32:19: netdata ERROR : PLUGINSD[ebpf] : read failed: end of file (errno 9, Bad file descriptor)
+```
+
+You can also check for errors related to `ebpf.plugin` inside `/var/log/audit/audit.log`:
+
+```bash
+type=AVC msg=audit(1586260134.952:97): avc: denied { map_create } for pid=1387 comm="ebpf.pl" scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:system_r:unconfined_service_t:s0 tclass=bpf permissive=0
+type=SYSCALL msg=audit(1586260134.952:97): arch=c000003e syscall=321 success=no exit=-13 a0=0 a1=7ffe6b36f000 a2=70 a3=0 items=0 ppid=1135 pid=1387 auid=4294967295 uid=994 gid=990 euid=0 suid=0 fsuid=0 egid=990 sgid=990 fsgid=990 tty=(none) ses=4294967295 comm="ebpf_proc
+ess.pl" exe="/usr/libexec/netdata/plugins.d/ebpf.plugin" subj=system_u:system_r:unconfined_service_t:s0 key=(null)
+```
+
+If you see similar errors, you will have to adjust SELinux's policies to enable the eBPF collector.
+
+### Creation of bpf policies
+
+To enable `ebpf.plugin` to run on a distribution with SELinux enabled, it will be necessary to take the following
+actions.
+
+First, stop the Netdata Agent.
+
+```bash
+# systemctl stop netdata
+```
+
+Next, create a policy with the `audit.log` file you examined earlier.
+
+```bash
+# grep ebpf.plugin /var/log/audit/audit.log | audit2allow -M netdata_ebpf
+```
+
+This will create two new files: `netdata_ebpf.te` and `netdata_ebpf.mod`.
+
+Edit the `netdata_ebpf.te` file to change the options `class` and `allow`. You should have the following at the end of
+the `netdata_ebpf.te` file.
+
+```conf
+module netdata_ebpf 1.0;
+require {
+ type unconfined_service_t;
+ class bpf { map_create map_read map_write prog_load prog_run };
+}
+#============= unconfined_service_t ==============
+allow unconfined_service_t self:bpf { map_create map_read map_write prog_load prog_run };
+```
+
+Then compile your `netdata_ebpf.te` file with the following commands to create a binary that loads the new policies:
+
+```bash
+# checkmodule -M -m -o netdata_ebpf.mod netdata_ebpf.te
+# semodule_package -o netdata_ebpf.pp -m netdata_ebpf.mod
+```
+
+Finally, you can load the new policy and start the Netdata agent again:
+
+```bash
+# semodule -i netdata_ebpf.pp
+# systemctl start netdata
+```
+
+## Lockdown
+
+Beginning with [version 5.4](https://www.zdnet.com/article/linux-to-get-kernel-lockdown-feature/), the Linux kernel has
+a feature called "lockdown," which may affect `ebpf.plugin` depending how the kernel was compiled. The following table
+shows how the lockdown module impacts `ebpf.plugin` based on the selected options:
+
+| Enforcing kernel lockdown | Enable lockdown LSM early in init | Default lockdown mode | Can `ebpf.plugin` run with this? |
+|:------------------------- |:--------------------------------- |:--------------------- |:-------------------------------- |
+| YES | NO | NO | YES |
+| YES | Yes | None | YES |
+| YES | Yes | Integrity | YES |
+| YES | Yes | Confidentiality | NO |
+
+If you or your distribution compiled the kernel with the last combination, your system cannot load shared libraries
+required to run `ebpf.plugin`.
+
+## Cleaning `kprobe_events`
+The eBPF collector adds entries to the file `/sys/kernel/debug/tracing/kprobe_events`, and cleans them on exit, unless
+another process prevents it. If you need to clean the eBPF entries safely, you can manually run the script
+`/usr/libexec/netdata/plugins.d/reset_netdata_trace.sh`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Febpf.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
new file mode 100644
index 000000000..56e084e97
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -0,0 +1,1953 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <ifaddrs.h>
+
+#include "ebpf.h"
+#include "ebpf_socket.h"
+
+/*****************************************************************
+ *
+ * FUNCTIONS USED BY NETDATA
+ *
+ *****************************************************************/
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result)
+{
+ UNUSED(variable);
+ UNUSED(hash);
+ UNUSED(rc);
+ UNUSED(result);
+ return 0;
+};
+
+void send_statistics(const char *action, const char *action_result, const char *action_data)
+{
+ UNUSED(action);
+ UNUSED(action_result);
+ UNUSED(action_data);
+}
+
+// callbacks required by popen()
+void signals_block(void){};
+void signals_unblock(void){};
+void signals_reset(void){};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret)
+{
+ exit(ret);
+}
+
+// ----------------------------------------------------------------------
+/*****************************************************************
+ *
+ * GLOBAL VARIABLES
+ *
+ *****************************************************************/
+
+char *ebpf_plugin_dir = PLUGINS_DIR;
+char *ebpf_user_config_dir = CONFIG_DIR;
+char *ebpf_stock_config_dir = LIBCONFIG_DIR;
+static char *ebpf_configured_log_dir = LOG_DIR;
+
+int update_every = 1;
+static int thread_finished = 0;
+int close_ebpf_plugin = 0;
+struct config collector_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+int running_on_kernel = 0;
+char kernel_string[64];
+int ebpf_nprocs;
+static int isrh;
+uint32_t finalized_threads = 1;
+
+pthread_mutex_t lock;
+pthread_mutex_t collect_data_mutex;
+pthread_cond_t collect_data_cond_var;
+
+ebpf_module_t ebpf_modules[] = {
+ { .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread,
+ .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
+ .optional = 0 },
+ { .thread_name = "socket", .config_name = "socket", .enabled = 0, .start_routine = ebpf_socket_thread,
+ .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
+ .optional = 0 },
+ { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1,
+ .global_charts = 0, .apps_charts = 1, .mode = MODE_ENTRY,
+ .optional = 0 },
+};
+
+// Link with apps.plugin
+ebpf_process_stat_t *global_process_stat = NULL;
+
+//Network viewer
+ebpf_network_viewer_options_t network_viewer_opt;
+
+/*****************************************************************
+ *
+ * FUNCTIONS USED TO CLEAN MEMORY AND OPERATE SYSTEM FILES
+ *
+ *****************************************************************/
+
+/**
+ * Clean port Structure
+ *
+ * Clean the allocated list.
+ *
+ * @param clean the list that will be cleaned
+ */
+void clean_port_structure(ebpf_network_viewer_port_list_t **clean)
+{
+ ebpf_network_viewer_port_list_t *move = *clean;
+ while (move) {
+ ebpf_network_viewer_port_list_t *next = move->next;
+ freez(move->value);
+ freez(move);
+
+ move = next;
+ }
+ *clean = NULL;
+}
+
+/**
+ * Clean IP structure
+ *
+ * Clean the allocated list.
+ *
+ * @param clean the list that will be cleaned
+ */
+static void clean_ip_structure(ebpf_network_viewer_ip_list_t **clean)
+{
+ ebpf_network_viewer_ip_list_t *move = *clean;
+ while (move) {
+ ebpf_network_viewer_ip_list_t *next = move->next;
+ freez(move);
+
+ move = next;
+ }
+ *clean = NULL;
+}
+
+/**
+ * Clean Loaded Events
+ *
+ * This function cleans the events previous loaded on Linux.
+void clean_loaded_events()
+{
+ int event_pid;
+ for (event_pid = 0; ebpf_modules[event_pid].probes; event_pid++)
+ clean_kprobe_events(NULL, (int)ebpf_modules[event_pid].thread_id, ebpf_modules[event_pid].probes);
+}
+ */
+
+/**
+ * Close the collector gracefully
+ *
+ * @param sig is the signal number used to close the collector
+ */
+static void ebpf_exit(int sig)
+{
+ close_ebpf_plugin = 1;
+
+ // When both threads were not finished case I try to go in front this address, the collector will crash
+ if (!thread_finished) {
+ return;
+ }
+
+ freez(global_process_stat);
+
+ /*
+ int ret = fork();
+ if (ret < 0) // error
+ error("Cannot fork(), so I won't be able to clean %skprobe_events", NETDATA_DEBUGFS);
+ else if (!ret) { // child
+ int i;
+ for (i = getdtablesize(); i >= 0; --i)
+ close(i);
+
+ int fd = open("/dev/null", O_RDWR, 0);
+ if (fd != -1) {
+ dup2(fd, STDIN_FILENO);
+ dup2(fd, STDOUT_FILENO);
+ dup2(fd, STDERR_FILENO);
+ }
+
+ if (fd > 2)
+ close(fd);
+
+ int sid = setsid();
+ if (sid >= 0) {
+ debug(D_EXIT, "Wait for father %d die", getpid());
+ sleep_usec(200000); // Sleep 200 miliseconds to father dies.
+ clean_loaded_events();
+ } else {
+ error("Cannot become session id leader, so I won't try to clean kprobe_events.\n");
+ }
+ } else { // parent
+ exit(0);
+ }
+ */
+
+ exit(sig);
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Get a value from a structure.
+ *
+ * @param basis it is the first address of the structure
+ * @param offset it is the offset of the data you want to access.
+ * @return
+ */
+collected_number get_value_from_structure(char *basis, size_t offset)
+{
+ collected_number *value = (collected_number *)(basis + offset);
+
+ collected_number ret = (collected_number)llabs(*value);
+ // this reset is necessary to avoid keep a constant value while processing is not executing a task
+ *value = 0;
+
+ return ret;
+}
+
+/**
+ * Write begin command on standard output
+ *
+ * @param family the chart family name
+ * @param name the chart name
+ */
+void write_begin_chart(char *family, char *name)
+{
+ printf("BEGIN %s.%s\n", family, name);
+}
+
+/**
+ * Write END command on stdout.
+ */
+inline void write_end_chart()
+{
+ printf("END\n");
+}
+
+/**
+ * Write set command on standard output
+ *
+ * @param dim the dimension name
+ * @param value the value for the dimension
+ */
+void write_chart_dimension(char *dim, long long value)
+{
+ int ret = printf("SET %s = %lld\n", dim, value);
+ UNUSED(ret);
+}
+
+/**
+ * Call the necessary functions to create a chart.
+ *
+ * @param name the chart name
+ * @param family the chart family
+ * @param move the pointer with the values that will be published
+ * @param end the number of values that will be written on standard output
+ *
+ * @return It returns a variable tha maps the charts that did not have zero values.
+ */
+void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end)
+{
+ write_begin_chart(family, name);
+
+ uint32_t i = 0;
+ while (move && i < end) {
+ write_chart_dimension(move->name, move->ncall);
+
+ move = move->next;
+ i++;
+ }
+
+ write_end_chart();
+}
+
+/**
+ * Call the necessary functions to create a chart.
+ *
+ * @param name the chart name
+ * @param family the chart family
+ * @param move the pointer with the values that will be published
+ * @param end the number of values that will be written on standard output
+ */
+void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end)
+{
+ write_begin_chart(family, name);
+
+ int i = 0;
+ while (move && i < end) {
+ write_chart_dimension(move->name, move->nerr);
+
+ move = move->next;
+ i++;
+ }
+
+ write_end_chart();
+}
+
+/**
+ * Call the necessary functions to create a chart.
+ *
+ * @param family the chart family
+ * @param move the pointer with the values that will be published
+ *
+ * @return It returns a variable tha maps the charts that did not have zero values.
+ */
+void write_io_chart(char *chart, char *family, char *dwrite, char *dread, netdata_publish_vfs_common_t *pvc)
+{
+ write_begin_chart(family, chart);
+
+ write_chart_dimension(dwrite, (long long)pvc->write);
+ write_chart_dimension(dread, (long long)pvc->read);
+
+ write_end_chart();
+}
+
+/**
+ * Write chart cmd on standard output
+ *
+ * @param type the chart type
+ * @param id the chart id
+ * @param title the chart title
+ * @param units the units label
+ * @param family the group name used to attach the chart on dashaboard
+ * @param charttype the chart type
+ * @param order the chart order
+ */
+void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *family, char *charttype, int order)
+{
+ printf("CHART %s.%s '' '%s' '%s' '%s' '' %s %d %d\n",
+ type,
+ id,
+ title,
+ units,
+ family,
+ charttype,
+ order,
+ update_every);
+}
+
+/**
+ * Write the dimension command on standard output
+ *
+ * @param n the dimension name
+ * @param d the dimension information
+ */
+void ebpf_write_global_dimension(char *n, char *d)
+{
+ printf("DIMENSION %s %s absolute 1 1\n", n, d);
+}
+
+/**
+ * Call ebpf_write_global_dimension to create the dimensions for a specific chart
+ *
+ * @param ptr a pointer to a structure of the type netdata_publish_syscall_t
+ * @param end the number of dimensions for the structure ptr
+ */
+void ebpf_create_global_dimension(void *ptr, int end)
+{
+ netdata_publish_syscall_t *move = ptr;
+
+ int i = 0;
+ while (move && i < end) {
+ ebpf_write_global_dimension(move->name, move->dimension);
+
+ move = move->next;
+ i++;
+ }
+}
+
+/**
+ * Call write_chart_cmd to create the charts
+ *
+ * @param type the chart type
+ * @param id the chart id
+ * @param units the axis label
+ * @param family the group name used to attach the chart on dashaboard
+ * @param order the order number of the specified chart
+ * @param ncd a pointer to a function called to create dimensions
+ * @param move a pointer for a structure that has the dimensions
+ * @param end number of dimensions for the chart created
+ */
+void ebpf_create_chart(char *type,
+ char *id,
+ char *title,
+ char *units,
+ char *family,
+ int order,
+ void (*ncd)(void *, int),
+ void *move,
+ int end)
+{
+ ebpf_write_chart_cmd(type, id, title, units, family, "line", order);
+
+ ncd(move, end);
+}
+
+/**
+ * Create charts on apps submenu
+ *
+ * @param id the chart id
+ * @param title the value displayed on vertical axis.
+ * @param units the value displayed on vertical axis.
+ * @param family Submenu that the chart will be attached on dashboard.
+ * @param order the chart order
+ * @param root structure used to create the dimensions.
+ */
+void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family, int order, struct target *root)
+{
+ struct target *w;
+ ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, "stacked", order);
+
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed))
+ fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name);
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO DEFINE OPTIONS
+ *
+ *****************************************************************/
+
+/**
+ * Define labels used to generate charts
+ *
+ * @param is structure with information about number of calls made for a function.
+ * @param pio structure used to generate charts.
+ * @param dim a pointer for the dimensions name
+ * @param name a pointer for the tensor with the name of the functions.
+ * @param end the number of elements in the previous 4 arguments.
+ */
+void ebpf_global_labels(netdata_syscall_stat_t *is, netdata_publish_syscall_t *pio, char **dim, char **name, int end)
+{
+ int i;
+
+ netdata_syscall_stat_t *prev = NULL;
+ netdata_publish_syscall_t *publish_prev = NULL;
+ for (i = 0; i < end; i++) {
+ if (prev) {
+ prev->next = &is[i];
+ }
+ prev = &is[i];
+
+ pio[i].dimension = dim[i];
+ pio[i].name = name[i];
+ if (publish_prev) {
+ publish_prev->next = &pio[i];
+ }
+ publish_prev = &pio[i];
+ }
+}
+
+/**
+ * Define thread mode for all ebpf program.
+ *
+ * @param lmode the mode that will be used for them.
+ */
+static inline void ebpf_set_thread_mode(netdata_run_mode_t lmode)
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].mode = lmode;
+ }
+}
+
+/**
+ * Enable specific charts selected by user.
+ *
+ * @param em the structure that will be changed
+ * @param enable the status about the apps charts.
+ */
+static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int enable)
+{
+ em->enabled = 1;
+ if (!enable) {
+ em->apps_charts = 1;
+ }
+ em->global_charts = 1;
+}
+
+/**
+ * Enable all charts
+ *
+ * @param apps what is the current status of apps
+ */
+static inline void ebpf_enable_all_charts(int apps)
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_enable_specific_chart(&ebpf_modules[i], apps);
+ }
+}
+
+/**
+ * Enable the specified chart group
+ *
+ * @param idx the index of ebpf_modules that I am enabling
+ * @param disable_apps should I keep apps charts?
+ */
+static inline void ebpf_enable_chart(int idx, int disable_apps)
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ if (i == idx) {
+ ebpf_enable_specific_chart(&ebpf_modules[i], disable_apps);
+ break;
+ }
+ }
+}
+
+/**
+ * Disable APPs
+ *
+ * Disable charts for apps loading only global charts.
+ */
+static inline void ebpf_disable_apps()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].apps_charts = 0;
+ }
+}
+
+/**
+ * Print help on standard error for user knows how to use the collector.
+ */
+void ebpf_print_help()
+{
+ const time_t t = time(NULL);
+ struct tm ct;
+ struct tm *test = localtime_r(&t, &ct);
+ int year;
+ if (test)
+ year = ct.tm_year;
+ else
+ year = 0;
+
+ fprintf(stderr,
+ "\n"
+ " Netdata ebpf.plugin %s\n"
+ " Copyright (C) 2016-%d Costa Tsaousis <costa@tsaousis.gr>\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " SECONDS set the data collection frequency.\n"
+ "\n"
+ " --help or -h show this help.\n"
+ "\n"
+ " --version or -v show software version.\n"
+ "\n"
+ " --global or -g disable charts per application.\n"
+ "\n"
+ " --all or -a Enable all chart groups (global and apps), unless -g is also given.\n"
+ "\n"
+ " --net or -n Enable network viewer charts.\n"
+ "\n"
+ " --process or -p Enable charts related to process run time.\n"
+ "\n"
+ " --return or -r Run the collector in return mode.\n"
+ "\n",
+ VERSION,
+ (year >= 116) ? year + 1900 : 2020);
+}
+
+/*****************************************************************
+ *
+ * AUXILIAR FUNCTIONS USED DURING INITIALIZATION
+ *
+ *****************************************************************/
+
+/**
+ * Is ip inside the range
+ *
+ * Check if the ip is inside a IP range
+ *
+ * @param rfirst the first ip address of the range
+ * @param rlast the last ip address of the range
+ * @param cmpfirst the first ip to compare
+ * @param cmplast the last ip to compare
+ * @param family the IP family
+ *
+ * @return It returns 1 if the IP is inside the range and 0 otherwise
+ */
+static int is_ip_inside_range(union netdata_ip_t *rfirst, union netdata_ip_t *rlast,
+ union netdata_ip_t *cmpfirst, union netdata_ip_t *cmplast, int family)
+{
+ if (family == AF_INET) {
+ if (ntohl(rfirst->addr32[0]) <= ntohl(cmpfirst->addr32[0]) &&
+ ntohl(rlast->addr32[0]) >= ntohl(cmplast->addr32[0]))
+ return 1;
+ } else {
+ if (memcmp(rfirst->addr8, cmpfirst->addr8, sizeof(union netdata_ip_t)) <= 0 &&
+ memcmp(rlast->addr8, cmplast->addr8, sizeof(union netdata_ip_t)) >= 0) {
+ return 1;
+ }
+
+ }
+ return 0;
+}
+
+
+/**
+ * Fill IP list
+ *
+ * @param out a pointer to the link list.
+ * @param in the structure that will be linked.
+ */
+static inline void fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table)
+{
+#ifndef NETDATA_INTERNAL_CHECKS
+ UNUSED(table);
+#endif
+ if (likely(*out)) {
+ ebpf_network_viewer_ip_list_t *move = *out, *store = *out;
+ while (move) {
+ if (in->ver == move->ver && is_ip_inside_range(&move->first, &move->last, &in->first, &in->last, in->ver)) {
+ info("The range/value (%s) is inside the range/value (%s) already inserted, it will be ignored.",
+ in->value, move->value);
+ freez(in->value);
+ freez(in);
+ return;
+ }
+ store = move;
+ move = move->next;
+ }
+
+ store->next = in;
+ } else {
+ *out = in;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char first[512], last[512];
+ if (in->ver == AF_INET) {
+ if (inet_ntop(AF_INET, in->first.addr8, first, INET_ADDRSTRLEN) &&
+ inet_ntop(AF_INET, in->last.addr8, last, INET_ADDRSTRLEN))
+ info("Adding values %s - %s to %s IP list \"%s\" used on network viewer",
+ first, last,
+ (*out == network_viewer_opt.included_ips)?"included":"excluded",
+ table);
+ } else {
+ if (inet_ntop(AF_INET6, in->first.addr8, first, INET6_ADDRSTRLEN) &&
+ inet_ntop(AF_INET6, in->last.addr8, last, INET6_ADDRSTRLEN))
+ info("Adding values %s - %s to %s IP list \"%s\" used on network viewer",
+ first, last,
+ (*out == network_viewer_opt.included_ips)?"included":"excluded",
+ table);
+ }
+#endif
+}
+
+/**
+ * Read Local Ports
+ *
+ * Parse /proc/net/{tcp,udp} and get the ports Linux is listening.
+ *
+ * @param filename the proc file to parse.
+ * @param proto is the magic number associated to the protocol file we are reading.
+ */
+static void read_local_ports(char *filename, uint8_t proto)
+{
+ procfile *ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if (!ff)
+ return;
+
+ ff = procfile_readall(ff);
+ if (!ff)
+ return;
+
+ size_t lines = procfile_lines(ff), l;
+ for(l = 0; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ // This is header or end of file
+ if (unlikely(words < 14))
+ continue;
+
+ // https://elixir.bootlin.com/linux/v5.7.8/source/include/net/tcp_states.h
+ // 0A = TCP_LISTEN
+ if (strcmp("0A", procfile_lineword(ff, l, 5)))
+ continue;
+
+ // Read local port
+ uint16_t port = (uint16_t)strtol(procfile_lineword(ff, l, 2), NULL, 16);
+ update_listen_table(htons(port), proto);
+ }
+
+ procfile_close(ff);
+}
+
+/**
+ * Read Local addresseses
+ *
+ * Read the local address from the interfaces.
+ */
+static void read_local_addresses()
+{
+ struct ifaddrs *ifaddr, *ifa;
+ if (getifaddrs(&ifaddr) == -1) {
+ error("Cannot get the local IP addresses, it is no possible to do separation between inbound and outbound connections");
+ return;
+ }
+
+ char *notext = { "No text representation" };
+ for (ifa = ifaddr; ifa != NULL; ifa = ifa->ifa_next) {
+ if (ifa->ifa_addr == NULL)
+ continue;
+
+ if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6))
+ continue;
+
+ ebpf_network_viewer_ip_list_t *w = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
+
+ int family = ifa->ifa_addr->sa_family;
+ w->ver = (uint8_t) family;
+ char text[INET6_ADDRSTRLEN];
+ if (family == AF_INET) {
+ struct sockaddr_in *in = (struct sockaddr_in*) ifa->ifa_addr;
+
+ w->first.addr32[0] = in->sin_addr.s_addr;
+ w->last.addr32[0] = in->sin_addr.s_addr;
+
+ if (inet_ntop(AF_INET, w->first.addr8, text, INET_ADDRSTRLEN)) {
+ w->value = strdupz(text);
+ w->hash = simple_hash(text);
+ } else {
+ w->value = strdupz(notext);
+ w->hash = simple_hash(notext);
+ }
+ } else {
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6*) ifa->ifa_addr;
+
+ memcpy(w->first.addr8, (void *)&in6->sin6_addr, sizeof(struct in6_addr));
+ memcpy(w->last.addr8, (void *)&in6->sin6_addr, sizeof(struct in6_addr));
+
+ if (inet_ntop(AF_INET6, w->first.addr8, text, INET_ADDRSTRLEN)) {
+ w->value = strdupz(text);
+ w->hash = simple_hash(text);
+ } else {
+ w->value = strdupz(notext);
+ w->hash = simple_hash(notext);
+ }
+ }
+
+ fill_ip_list((family == AF_INET)?&network_viewer_opt.ipv4_local_ip:&network_viewer_opt.ipv6_local_ip,
+ w,
+ "selector");
+ }
+
+ freeifaddrs(ifaddr);
+}
+
+/**
+ * Start Ptherad Variable
+ *
+ * This function starts all pthread variables.
+ *
+ * @return It returns 0 on success and -1.
+ */
+int ebpf_start_pthread_variables()
+{
+ pthread_mutex_init(&lock, NULL);
+ pthread_mutex_init(&collect_data_mutex, NULL);
+
+ if (pthread_cond_init(&collect_data_cond_var, NULL)) {
+ thread_finished++;
+ error("Cannot start conditional variable to control Apps charts.");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Allocate the vectors used for all threads.
+ */
+static void ebpf_allocate_common_vectors()
+{
+ all_pids = callocz((size_t)pid_max, sizeof(struct pid_stat *));
+ global_process_stat = callocz((size_t)ebpf_nprocs, sizeof(ebpf_process_stat_t));
+}
+
+/**
+ * Fill the ebpf_data structure with default values
+ *
+ * @param ef the pointer to set default values
+ */
+void fill_ebpf_data(ebpf_data_t *ef)
+{
+ memset(ef, 0, sizeof(ebpf_data_t));
+ ef->kernel_string = kernel_string;
+ ef->running_on_kernel = running_on_kernel;
+ ef->map_fd = callocz(EBPF_MAX_MAPS, sizeof(int));
+ ef->isrh = isrh;
+}
+
+/**
+ * Define how to load the ebpf programs
+ *
+ * @param ptr the option given by users
+ */
+static inline void how_to_load(char *ptr)
+{
+ if (!strcasecmp(ptr, "return"))
+ ebpf_set_thread_mode(MODE_RETURN);
+ else if (!strcasecmp(ptr, "entry"))
+ ebpf_set_thread_mode(MODE_ENTRY);
+ else
+ error("the option %s for \"ebpf load mode\" is not a valid option.", ptr);
+}
+
+/**
+ * Fill Port list
+ *
+ * @param out a pointer to the link list.
+ * @param in the structure that will be linked.
+ */
+static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_network_viewer_port_list_t *in)
+{
+ if (likely(*out)) {
+ ebpf_network_viewer_port_list_t *move = *out, *store = *out;
+ uint16_t first = ntohs(in->first);
+ uint16_t last = ntohs(in->last);
+ while (move) {
+ uint16_t cmp_first = ntohs(move->first);
+ uint16_t cmp_last = ntohs(move->last);
+ if (cmp_first <= first && first <= cmp_last &&
+ cmp_first <= last && last <= cmp_last ) {
+ info("The range/value (%u, %u) is inside the range/value (%u, %u) already inserted, it will be ignored.",
+ first, last, cmp_first, cmp_last);
+ freez(in->value);
+ freez(in);
+ return;
+ } else if (first <= cmp_first && cmp_first <= last &&
+ first <= cmp_last && cmp_last <= last) {
+ info("The range (%u, %u) is bigger than previous range (%u, %u) already inserted, the previous will be ignored.",
+ first, last, cmp_first, cmp_last);
+ freez(move->value);
+ move->value = in->value;
+ move->first = in->first;
+ move->last = in->last;
+ freez(in);
+ return;
+ }
+
+ store = move;
+ move = move->next;
+ }
+
+ store->next = in;
+ } else {
+ *out = in;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("Adding values %s( %u, %u) to %s port list used on network viewer",
+ in->value, ntohs(in->first), ntohs(in->last),
+ (*out == network_viewer_opt.included_port)?"included":"excluded");
+#endif
+}
+
+/**
+ * Fill port list
+ *
+ * Fill an allocated port list with the range given
+ *
+ * @param out a pointer to store the link list
+ * @param range the informed range for the user.
+ */
+static void parse_port_list(void **out, char *range)
+{
+ int first, last;
+ ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
+
+ char *copied = strdupz(range);
+ if (*range == '*' && *(range+1) == '\0') {
+ first = 1;
+ last = 65535;
+
+ clean_port_structure(list);
+ goto fillenvpl;
+ }
+
+ char *end = range;
+ //Move while I cannot find a separator
+ while (*end && *end != ':' && *end != '-') end++;
+
+ //It has a range
+ if (likely(*end)) {
+ *end++ = '\0';
+ if (*end == '!') {
+ info("The exclusion cannot be in the second part of the range, the range %s will be ignored.", copied);
+ freez(copied);
+ return;
+ }
+ last = str2i((const char *)end);
+ } else {
+ last = 0;
+ }
+
+ first = str2i((const char *)range);
+ if (first < NETDATA_MINIMUM_PORT_VALUE || first > NETDATA_MAXIMUM_PORT_VALUE) {
+ info("The first port %d of the range \"%s\" is invalid and it will be ignored!", first, copied);
+ freez(copied);
+ return;
+ }
+
+ if (!last)
+ last = first;
+
+ if (last < NETDATA_MINIMUM_PORT_VALUE || last > NETDATA_MAXIMUM_PORT_VALUE) {
+ info("The second port %d of the range \"%s\" is invalid and the whole range will be ignored!", last, copied);
+ freez(copied);
+ return;
+ }
+
+ if (first > last) {
+ info("The specified order %s is wrong, the smallest value is always the first, it will be ignored!", copied);
+ freez(copied);
+ return;
+ }
+
+ ebpf_network_viewer_port_list_t *w;
+fillenvpl:
+ w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
+ w->value = copied;
+ w->hash = simple_hash(copied);
+ w->first = (uint16_t)htons((uint16_t)first);
+ w->last = (uint16_t)htons((uint16_t)last);
+ w->cmp_first = (uint16_t)first;
+ w->cmp_last = (uint16_t)last;
+
+ fill_port_list(list, w);
+}
+
+/**
+ * Parse Service List
+ *
+ * @param out a pointer to store the link list
+ * @param service the service used to create the structure that will be linked.
+ */
+static void parse_service_list(void **out, char *service)
+{
+ ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
+ struct servent *serv = getservbyname((const char *)service, "tcp");
+ if (!serv)
+ serv = getservbyname((const char *)service, "udp");
+
+ if (!serv) {
+ info("Cannot resolv the service '%s' with protocols TCP and UDP, it will be ignored", service);
+ return;
+ }
+
+ ebpf_network_viewer_port_list_t *w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
+ w->value = strdupz(service);
+ w->hash = simple_hash(service);
+
+ w->first = w->last = (uint16_t)serv->s_port;
+
+ fill_port_list(list, w);
+}
+
+/**
+ * Netmask
+ *
+ * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
+ *
+ * @param prefix create the netmask based in the CIDR value.
+ *
+ * @return
+ */
+static inline in_addr_t netmask(int prefix) {
+
+ if (prefix == 0)
+ return (~((in_addr_t) - 1));
+ else
+ return (in_addr_t)(~((1 << (32 - prefix)) - 1));
+
+}
+
+/**
+ * Broadcast
+ *
+ * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
+ *
+ * @param addr is the ip address
+ * @param prefix is the CIDR value.
+ *
+ * @return It returns the last address of the range
+ */
+static inline in_addr_t broadcast(in_addr_t addr, int prefix)
+{
+ return (addr | ~netmask(prefix));
+}
+
+/**
+ * Network
+ *
+ * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
+ *
+ * @param addr is the ip address
+ * @param prefix is the CIDR value.
+ *
+ * @return It returns the first address of the range.
+ */
+static inline in_addr_t ipv4_network(in_addr_t addr, int prefix)
+{
+ return (addr & netmask(prefix));
+}
+
+/**
+ * IP to network long
+ *
+ * @param dst the vector to store the result
+ * @param ip the source ip given by our users.
+ * @param domain the ip domain (IPV4 or IPV6)
+ * @param source the original string
+ *
+ * @return it returns 0 on success and -1 otherwise.
+ */
+static inline int ip2nl(uint8_t *dst, char *ip, int domain, char *source)
+{
+ if (inet_pton(domain, ip, dst) <= 0) {
+ error("The address specified (%s) is invalid ", source);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Get IPV6 Last Address
+ *
+ * @param out the address to store the last address.
+ * @param in the address used to do the math.
+ * @param prefix number of bits used to calculate the address
+ */
+static void get_ipv6_last_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix)
+{
+ uint64_t mask,tmp;
+ uint64_t ret[2];
+ memcpy(ret, in->addr32, sizeof(union netdata_ip_t));
+
+ if (prefix == 128) {
+ memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t));
+ return;
+ } else if (!prefix) {
+ ret[0] = ret[1] = 0xFFFFFFFFFFFFFFFF;
+ memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
+ return;
+ } else if (prefix <= 64) {
+ ret[1] = 0xFFFFFFFFFFFFFFFFULL;
+
+ tmp = be64toh(ret[0]);
+ if (prefix > 0) {
+ mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix);
+ tmp |= ~mask;
+ }
+ ret[0] = htobe64(tmp);
+ } else {
+ mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix);
+ tmp = be64toh(ret[1]);
+ tmp |= ~mask;
+ ret[1] = htobe64(tmp);
+ }
+
+ memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
+}
+
+/**
+ * Calculate ipv6 first address
+ *
+ * @param out the address to store the first address.
+ * @param in the address used to do the math.
+ * @param prefix number of bits used to calculate the address
+ */
+static void get_ipv6_first_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix)
+{
+ uint64_t mask,tmp;
+ uint64_t ret[2];
+
+ memcpy(ret, in->addr32, sizeof(union netdata_ip_t));
+
+ if (prefix == 128) {
+ memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t));
+ return;
+ } else if (!prefix) {
+ ret[0] = ret[1] = 0;
+ memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
+ return;
+ } else if (prefix <= 64) {
+ ret[1] = 0ULL;
+
+ tmp = be64toh(ret[0]);
+ if (prefix > 0) {
+ mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix);
+ tmp &= mask;
+ }
+ ret[0] = htobe64(tmp);
+ } else {
+ mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix);
+ tmp = be64toh(ret[1]);
+ tmp &= mask;
+ ret[1] = htobe64(tmp);
+ }
+
+ memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
+}
+
+/**
+ * Parse IP List
+ *
+ * Parse IP list and link it.
+ *
+ * @param out a pointer to store the link list
+ * @param ip the value given as parameter
+ */
+static void parse_ip_list(void **out, char *ip)
+{
+ ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out;
+
+ char *ipdup = strdupz(ip);
+ union netdata_ip_t first = { };
+ union netdata_ip_t last = { };
+ char *is_ipv6;
+ if (*ip == '*' && *(ip+1) == '\0') {
+ memset(first.addr8, 0, sizeof(first.addr8));
+ memset(last.addr8, 0xFF, sizeof(last.addr8));
+
+ is_ipv6 = ip;
+
+ clean_ip_structure(list);
+ goto storethisip;
+ }
+
+ char *end = ip;
+ // Move while I cannot find a separator
+ while (*end && *end != '/' && *end != '-') end++;
+
+ // We will use only the classic IPV6 for while, but we could consider the base 85 in a near future
+ // https://tools.ietf.org/html/rfc1924
+ is_ipv6 = strchr(ip, ':');
+
+ int select;
+ if (*end && !is_ipv6) { // IPV4 range
+ select = (*end == '/') ? 0 : 1;
+ *end++ = '\0';
+ if (*end == '!') {
+ info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
+ goto cleanipdup;
+ }
+
+ if (!select) { // CIDR
+ select = ip2nl(first.addr8, ip, AF_INET, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ select = (int) str2i(end);
+ if (select < NETDATA_MINIMUM_IPV4_CIDR || select > NETDATA_MAXIMUM_IPV4_CIDR) {
+ info("The specified CIDR %s is not valid, the IP %s will be ignored.", end, ip);
+ goto cleanipdup;
+ }
+
+ last.addr32[0] = htonl(broadcast(ntohl(first.addr32[0]), select));
+ // This was added to remove
+ // https://app.codacy.com/manual/netdata/netdata/pullRequest?prid=5810941&bid=19021977
+ UNUSED(last.addr32[0]);
+
+ uint32_t ipv4_test = htonl(ipv4_network(ntohl(first.addr32[0]), select));
+ if (first.addr32[0] != ipv4_test) {
+ first.addr32[0] = ipv4_test;
+ struct in_addr ipv4_convert;
+ ipv4_convert.s_addr = ipv4_test;
+ char ipv4_msg[INET_ADDRSTRLEN];
+ if(inet_ntop(AF_INET, &ipv4_convert, ipv4_msg, INET_ADDRSTRLEN))
+ info("The network value of CIDR %s was updated for %s .", ipdup, ipv4_msg);
+ }
+ } else { // Range
+ select = ip2nl(first.addr8, ip, AF_INET, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ select = ip2nl(last.addr8, end, AF_INET, ipdup);
+ if (select)
+ goto cleanipdup;
+ }
+
+ if (htonl(first.addr32[0]) > htonl(last.addr32[0])) {
+ info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.",
+ ipdup);
+ goto cleanipdup;
+ }
+ } else if (is_ipv6) { // IPV6
+ if (!*end) { // Unique
+ select = ip2nl(first.addr8, ip, AF_INET6, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ memcpy(last.addr8, first.addr8, sizeof(first.addr8));
+ } else if (*end == '-') {
+ *end++ = 0x00;
+ if (*end == '!') {
+ info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
+ goto cleanipdup;
+ }
+
+ select = ip2nl(first.addr8, ip, AF_INET6, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ select = ip2nl(last.addr8, end, AF_INET6, ipdup);
+ if (select)
+ goto cleanipdup;
+ } else { // CIDR
+ *end++ = 0x00;
+ if (*end == '!') {
+ info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
+ goto cleanipdup;
+ }
+
+ select = str2i(end);
+ if (select < 0 || select > 128) {
+ info("The CIDR %s is not valid, the address %s will be ignored.", end, ip);
+ goto cleanipdup;
+ }
+
+ uint64_t prefix = (uint64_t)select;
+ select = ip2nl(first.addr8, ip, AF_INET6, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ get_ipv6_last_addr(&last, &first, prefix);
+
+ union netdata_ip_t ipv6_test;
+ get_ipv6_first_addr(&ipv6_test, &first, prefix);
+
+ if (memcmp(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t)) != 0) {
+ memcpy(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t));
+
+ struct in6_addr ipv6_convert;
+ memcpy(ipv6_convert.s6_addr, ipv6_test.addr8, sizeof(union netdata_ip_t));
+
+ char ipv6_msg[INET6_ADDRSTRLEN];
+ if(inet_ntop(AF_INET6, &ipv6_convert, ipv6_msg, INET6_ADDRSTRLEN))
+ info("The network value of CIDR %s was updated for %s .", ipdup, ipv6_msg);
+ }
+ }
+
+ if ((be64toh(*(uint64_t *)&first.addr32[2]) > be64toh(*(uint64_t *)&last.addr32[2]) &&
+ !memcmp(first.addr32, last.addr32, 2*sizeof(uint32_t))) ||
+ (be64toh(*(uint64_t *)&first.addr32) > be64toh(*(uint64_t *)&last.addr32)) ) {
+ info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.",
+ ipdup);
+ goto cleanipdup;
+ }
+ } else { // Unique ip
+ select = ip2nl(first.addr8, ip, AF_INET, ipdup);
+ if (select)
+ goto cleanipdup;
+
+ memcpy(last.addr8, first.addr8, sizeof(first.addr8));
+ }
+
+ ebpf_network_viewer_ip_list_t *store;
+
+storethisip:
+ store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
+ store->value = ipdup;
+ store->hash = simple_hash(ipdup);
+ store->ver = (uint8_t)(!is_ipv6)?AF_INET:AF_INET6;
+ memcpy(store->first.addr8, first.addr8, sizeof(first.addr8));
+ memcpy(store->last.addr8, last.addr8, sizeof(last.addr8));
+
+ fill_ip_list(list, store, "socket");
+ return;
+
+cleanipdup:
+ freez(ipdup);
+}
+
+/**
+ * Parse IP Range
+ *
+ * Parse the IP ranges given and create Network Viewer IP Structure
+ *
+ * @param ptr is a pointer with the text to parse.
+ */
+static void parse_ips(char *ptr)
+{
+ // No value
+ if (unlikely(!ptr))
+ return;
+
+ while (likely(ptr)) {
+ // Move forward until next valid character
+ while (isspace(*ptr)) ptr++;
+
+ // No valid value found
+ if (unlikely(!*ptr))
+ return;
+
+ // Find space that ends the list
+ char *end = strchr(ptr, ' ');
+ if (end) {
+ *end++ = '\0';
+ }
+
+ int neg = 0;
+ if (*ptr == '!') {
+ neg++;
+ ptr++;
+ }
+
+ if (isascii(*ptr)) { // Parse port
+ parse_ip_list((!neg)?(void **)&network_viewer_opt.included_ips:(void **)&network_viewer_opt.excluded_ips,
+ ptr);
+ }
+
+ ptr = end;
+ }
+}
+
+
+/**
+ * Parse Port Range
+ *
+ * Parse the port ranges given and create Network Viewer Port Structure
+ *
+ * @param ptr is a pointer with the text to parse.
+ */
+static void parse_ports(char *ptr)
+{
+ // No value
+ if (unlikely(!ptr))
+ return;
+
+ while (likely(ptr)) {
+ // Move forward until next valid character
+ while (isspace(*ptr)) ptr++;
+
+ // No valid value found
+ if (unlikely(!*ptr))
+ return;
+
+ // Find space that ends the list
+ char *end = strchr(ptr, ' ');
+ if (end) {
+ *end++ = '\0';
+ }
+
+ int neg = 0;
+ if (*ptr == '!') {
+ neg++;
+ ptr++;
+ }
+
+ if (isdigit(*ptr)) { // Parse port
+ parse_port_list((!neg)?(void **)&network_viewer_opt.included_port:(void **)&network_viewer_opt.excluded_port,
+ ptr);
+ } else if (isalpha(*ptr)) { // Parse service
+ parse_service_list((!neg)?(void **)&network_viewer_opt.included_port:(void **)&network_viewer_opt.excluded_port,
+ ptr);
+ } else if (*ptr == '*') { // All
+ parse_port_list((!neg)?(void **)&network_viewer_opt.included_port:(void **)&network_viewer_opt.excluded_port,
+ ptr);
+ }
+
+ ptr = end;
+ }
+}
+
+/**
+ * Link hostname
+ *
+ * @param out is the output link list
+ * @param in the hostname to add to list.
+ */
+static void link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_network_viewer_hostname_list_t *in)
+{
+ if (likely(*out)) {
+ ebpf_network_viewer_hostname_list_t *move = *out;
+ for (; move->next ; move = move->next ) {
+ if (move->hash == in->hash && !strcmp(move->value, in->value)) {
+ info("The hostname %s was already inserted, it will be ignored.", in->value);
+ freez(in->value);
+ simple_pattern_free(in->value_pattern);
+ freez(in);
+ return;
+ }
+ }
+
+ move->next = in;
+ } else {
+ *out = in;
+ }
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("Adding value %s to %s hostname list used on network viewer",
+ in->value,
+ (*out == network_viewer_opt.included_hostnames)?"included":"excluded");
+#endif
+}
+
+/**
+ * Link Hostnames
+ *
+ * Parse the list of hostnames to create the link list.
+ * This is not associated with the IP, because simple patterns like *example* cannot be resolved to IP.
+ *
+ * @param out is the output link list
+ * @param parse is a pointer with the text to parser.
+ */
+static void link_hostnames(char *parse)
+{
+ // No value
+ if (unlikely(!parse))
+ return;
+
+ while (likely(parse)) {
+ // Find the first valid value
+ while (isspace(*parse)) parse++;
+
+ // No valid value found
+ if (unlikely(!*parse))
+ return;
+
+ // Find space that ends the list
+ char *end = strchr(parse, ' ');
+ if (end) {
+ *end++ = '\0';
+ }
+
+ int neg = 0;
+ if (*parse == '!') {
+ neg++;
+ parse++;
+ }
+
+ ebpf_network_viewer_hostname_list_t *hostname = callocz(1 , sizeof(ebpf_network_viewer_hostname_list_t));
+ hostname->value = strdupz(parse);
+ hostname->hash = simple_hash(parse);
+ hostname->value_pattern = simple_pattern_create(parse, NULL, SIMPLE_PATTERN_EXACT);
+
+ link_hostname((!neg)?&network_viewer_opt.included_hostnames:&network_viewer_opt.excluded_hostnames,
+ hostname);
+
+ parse = end;
+ }
+}
+
+/**
+ * Read max dimension.
+ *
+ * Netdata plot two dimensions per connection, so it is necessary to adjust the values.
+ */
+static void read_max_dimension()
+{
+ int maxdim ;
+ maxdim = (int) appconfig_get_number(&collector_config,
+ EBPF_NETWORK_VIEWER_SECTION,
+ "maximum dimensions",
+ NETDATA_NV_CAP_VALUE);
+ if (maxdim < 0) {
+ error("'maximum dimensions = %d' must be a positive number, Netdata will change for default value %ld.",
+ maxdim, NETDATA_NV_CAP_VALUE);
+ maxdim = NETDATA_NV_CAP_VALUE;
+ }
+
+ maxdim /= 2;
+ if (!maxdim) {
+ info("The number of dimensions is too small (%u), we are setting it to minimum 2", network_viewer_opt.max_dim);
+ network_viewer_opt.max_dim = 1;
+ }
+
+ network_viewer_opt.max_dim = (uint32_t)maxdim;
+}
+
+/**
+ * Parse network viewer section
+ */
+static void parse_network_viewer_section()
+{
+ read_max_dimension();
+
+ network_viewer_opt.hostname_resolution_enabled = appconfig_get_boolean(&collector_config,
+ EBPF_NETWORK_VIEWER_SECTION,
+ "resolve hostnames",
+ CONFIG_BOOLEAN_NO);
+
+ network_viewer_opt.service_resolution_enabled = appconfig_get_boolean(&collector_config,
+ EBPF_NETWORK_VIEWER_SECTION,
+ "resolve service names",
+ CONFIG_BOOLEAN_NO);
+
+ char *value = appconfig_get(&collector_config, EBPF_NETWORK_VIEWER_SECTION,
+ "ports", NULL);
+ parse_ports(value);
+
+ if (network_viewer_opt.hostname_resolution_enabled) {
+ value = appconfig_get(&collector_config, EBPF_NETWORK_VIEWER_SECTION, "hostnames", NULL);
+ link_hostnames(value);
+ } else {
+ info("Name resolution is disabled, collector will not parser \"hostnames\" list.");
+ }
+
+ value = appconfig_get(&collector_config, EBPF_NETWORK_VIEWER_SECTION,
+ "ips", "!127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128");
+ parse_ips(value);
+}
+
+/**
+ * Link dimension name
+ *
+ * Link user specified names inside a link list.
+ *
+ * @param port the port number associated to the dimension name.
+ * @param hash the calculated hash for the dimension name.
+ * @param name the dimension name.
+ */
+static void link_dimension_name(char *port, uint32_t hash, char *value)
+{
+ int test = str2i(port);
+ if (test < NETDATA_MINIMUM_PORT_VALUE || test > NETDATA_MAXIMUM_PORT_VALUE){
+ error("The dimension given (%s = %s) has an invalid value and it will be ignored.", port, value);
+ return;
+ }
+
+ ebpf_network_viewer_dim_name_t *w;
+ w = callocz(1, sizeof(ebpf_network_viewer_dim_name_t));
+
+ w->name = strdupz(value);
+ w->hash = hash;
+
+ w->port = (uint16_t) htons(test);
+
+ ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names;
+ if (unlikely(!names)) {
+ network_viewer_opt.names = w;
+ } else {
+ for (; names->next; names = names->next) {
+ if (names->port == w->port) {
+ info("Dupplicated definition for a service, the name %s will be ignored. ", names->name);
+ freez(names->name);
+ names->name = w->name;
+ names->hash = w->hash;
+ freez(w);
+ return;
+ }
+ }
+ names->next = w;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("Adding values %s( %u) to dimension name list used on network viewer", w->name, htons(w->port));
+#endif
+}
+
+/**
+ * Parse service Name section.
+ *
+ * This function gets the values that will be used to overwrite dimensions.
+ */
+static void parse_service_name_section()
+{
+ struct section *co = appconfig_get_section(&collector_config, EBPF_SERVICE_NAME_SECTION);
+ if (co) {
+ struct config_option *cv;
+ for (cv = co->values; cv ; cv = cv->next) {
+ link_dimension_name(cv->name, cv->hash, cv->value);
+ }
+ }
+
+ // Always associated the default port to Netdata
+ ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names;
+ if (names) {
+ uint16_t default_port = htons(19999);
+ while (names) {
+ if (names->port == default_port)
+ return;
+
+ names = names->next;
+ }
+ }
+
+ char *port_string = getenv("NETDATA_LISTEN_PORT");
+ if (port_string)
+ link_dimension_name(port_string, simple_hash(port_string), "Netdata");
+}
+
+/**
+ * Read collector values
+ *
+ * @param disable_apps variable to store information related to apps.
+ */
+static void read_collector_values(int *disable_apps)
+{
+ // Read global section
+ char *value;
+ if (appconfig_exists(&collector_config, EBPF_GLOBAL_SECTION, "load")) // Backward compatibility
+ value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "load", "entry");
+ else
+ value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "ebpf load mode", "entry");
+
+ how_to_load(value);
+
+ // This is kept to keep compatibility
+ uint32_t enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, "disable apps",
+ CONFIG_BOOLEAN_NO);
+ if (!enabled) {
+ // Apps is a positive sentence, so we need to invert the values to disable apps.
+ enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, "apps",
+ CONFIG_BOOLEAN_YES);
+ enabled = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO;
+ }
+ *disable_apps = (int)enabled;
+
+ // Read ebpf programs section
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
+ ebpf_modules[0].config_name, CONFIG_BOOLEAN_YES);
+ int started = 0;
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_apps);
+ started++;
+ }
+
+ // This is kept to keep compatibility
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network viewer",
+ CONFIG_BOOLEAN_NO);
+ if (!enabled)
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, ebpf_modules[1].config_name,
+ CONFIG_BOOLEAN_NO);
+
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps);
+ // Read network viewer section if network viewer is enabled
+ parse_network_viewer_section();
+ parse_service_name_section();
+ started++;
+ }
+
+ // This is kept to keep compatibility
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connection monitoring",
+ CONFIG_BOOLEAN_NO);
+ if (!enabled)
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connections",
+ CONFIG_BOOLEAN_NO);
+ ebpf_modules[1].optional = enabled;
+
+ if (!started){
+ ebpf_enable_all_charts(*disable_apps);
+ // Read network viewer section
+ parse_network_viewer_section();
+ parse_service_name_section();
+ }
+}
+
+/**
+ * Load collector config
+ *
+ * @param path the path where the file ebpf.conf is stored.
+ * @param disable_apps variable to store the information about apps plugin status.
+ *
+ * @return 0 on success and -1 otherwise.
+ */
+static int load_collector_config(char *path, int *disable_apps)
+{
+ char lpath[4096];
+
+ snprintf(lpath, 4095, "%s/%s", path, "ebpf.conf");
+
+ if (!appconfig_load(&collector_config, lpath, 0, NULL))
+ return -1;
+
+ read_collector_values(disable_apps);
+
+ return 0;
+}
+
+/**
+ * Set global variables reading environment variables
+ */
+void set_global_variables()
+{
+ // Get environment variables
+ ebpf_plugin_dir = getenv("NETDATA_PLUGINS_DIR");
+ if (!ebpf_plugin_dir)
+ ebpf_plugin_dir = PLUGINS_DIR;
+
+ ebpf_user_config_dir = getenv("NETDATA_USER_CONFIG_DIR");
+ if (!ebpf_user_config_dir)
+ ebpf_user_config_dir = CONFIG_DIR;
+
+ ebpf_stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
+ if (!ebpf_stock_config_dir)
+ ebpf_stock_config_dir = LIBCONFIG_DIR;
+
+ ebpf_configured_log_dir = getenv("NETDATA_LOG_DIR");
+ if (!ebpf_configured_log_dir)
+ ebpf_configured_log_dir = LOG_DIR;
+
+ ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
+ if (ebpf_nprocs > NETDATA_MAX_PROCESSOR) {
+ ebpf_nprocs = NETDATA_MAX_PROCESSOR;
+ }
+
+ isrh = get_redhat_release();
+ pid_max = get_system_pid_max();
+}
+
+/**
+ * Parse arguments given from user.
+ *
+ * @param argc the number of arguments
+ * @param argv the pointer to the arguments
+ */
+static void parse_args(int argc, char **argv)
+{
+ int enabled = 0;
+ int disable_apps = 0;
+ int freq = 0;
+ int option_index = 0;
+ static struct option long_options[] = {
+ {"help", no_argument, 0, 'h' },
+ {"version", no_argument, 0, 'v' },
+ {"global", no_argument, 0, 'g' },
+ {"all", no_argument, 0, 'a' },
+ {"net", no_argument, 0, 'n' },
+ {"process", no_argument, 0, 'p' },
+ {"return", no_argument, 0, 'r' },
+ {0, 0, 0, 0}
+ };
+
+ memset(&network_viewer_opt, 0, sizeof(network_viewer_opt));
+ network_viewer_opt.max_dim = NETDATA_NV_CAP_VALUE;
+
+ if (argc > 1) {
+ int n = (int)str2l(argv[1]);
+ if (n > 0) {
+ freq = n;
+ }
+ }
+
+ while (1) {
+ int c = getopt_long(argc, argv, "hvganpr", long_options, &option_index);
+ if (c == -1)
+ break;
+
+ switch (c) {
+ case 'h': {
+ ebpf_print_help();
+ exit(0);
+ }
+ case 'v': {
+ printf("ebpf.plugin %s\n", VERSION);
+ exit(0);
+ }
+ case 'g': {
+ disable_apps = 1;
+ ebpf_disable_apps();
+#ifdef NETDATA_INTERNAL_CHECKS
+ info(
+ "EBPF running with global chart group, because it was started with the option \"--global\" or \"-g\".");
+#endif
+ break;
+ }
+ case 'a': {
+ ebpf_enable_all_charts(disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with all chart groups, because it was started with the option \"--all\" or \"-a\".");
+#endif
+ break;
+ }
+ case 'n': {
+ enabled = 1;
+ ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"NET\" charts, because it was started with the option \"--net\" or \"-n\".");
+#endif
+ break;
+ }
+ case 'p': {
+ enabled = 1;
+ ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info(
+ "EBPF enabling \"PROCESS\" charts, because it was started with the option \"--process\" or \"-p\".");
+#endif
+ break;
+ }
+ case 'r': {
+ ebpf_set_thread_mode(MODE_RETURN);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running in \"return\" mode, because it was started with the option \"--return\" or \"-r\".");
+#endif
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+
+ if (freq > 0) {
+ update_every = freq;
+ }
+
+ if (load_collector_config(ebpf_user_config_dir, &disable_apps)) {
+ info(
+ "Does not have a configuration file inside `%s/ebpf.conf. It will try to load stock file.",
+ ebpf_user_config_dir);
+ if (load_collector_config(ebpf_stock_config_dir, &disable_apps)) {
+ info("Does not have a stock file. It is starting with default options.");
+ } else {
+ enabled = 1;
+ }
+ } else {
+ enabled = 1;
+ }
+
+ if (!enabled) {
+ ebpf_enable_all_charts(disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with all charts, because neither \"-n\" or \"-p\" was given.");
+#endif
+ }
+
+ if (disable_apps)
+ return;
+
+ // Load apps_groups.conf
+ if (ebpf_read_apps_groups_conf(
+ &apps_groups_default_target, &apps_groups_root_target, ebpf_user_config_dir, "groups")) {
+ info(
+ "Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'",
+ ebpf_user_config_dir, ebpf_stock_config_dir);
+ if (ebpf_read_apps_groups_conf(
+ &apps_groups_default_target, &apps_groups_root_target, ebpf_stock_config_dir, "groups")) {
+ error(
+ "Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.",
+ ebpf_stock_config_dir);
+ thread_finished++;
+ ebpf_exit(1);
+ }
+ } else
+ info("Loaded config file '%s/apps_groups.conf'", ebpf_user_config_dir);
+}
+
+/*****************************************************************
+ *
+ * COLLECTOR ENTRY POINT
+ *
+ *****************************************************************/
+
+/**
+ * Entry point
+ *
+ * @param argc the number of arguments
+ * @param argv the pointer to the arguments
+ *
+ * @return it returns 0 on success and another integer otherwise
+ */
+int main(int argc, char **argv)
+{
+ set_global_variables();
+ parse_args(argc, argv);
+
+ running_on_kernel = get_kernel_version(kernel_string, 63);
+ if (!has_condition_to_run(running_on_kernel)) {
+ error("The current collector cannot run on this kernel.");
+ return 2;
+ }
+
+ if (!am_i_running_as_root()) {
+ error(
+ "ebpf.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities..",
+ (unsigned int)getuid(), (unsigned int)geteuid());
+ return 3;
+ }
+
+ // set name
+ program_name = "ebpf.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ struct rlimit r = { RLIM_INFINITY, RLIM_INFINITY };
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ error("Setrlimit(RLIMIT_MEMLOCK)");
+ return 4;
+ }
+
+ signal(SIGINT, ebpf_exit);
+ signal(SIGTERM, ebpf_exit);
+ signal(SIGPIPE, ebpf_exit);
+
+ if (ebpf_start_pthread_variables()) {
+ thread_finished++;
+ error("Cannot start mutex to control overall charts.");
+ ebpf_exit(5);
+ }
+
+ ebpf_allocate_common_vectors();
+
+ read_local_addresses();
+ read_local_ports("/proc/net/tcp", IPPROTO_TCP);
+ read_local_ports("/proc/net/tcp6", IPPROTO_TCP);
+ read_local_ports("/proc/net/udp", IPPROTO_UDP);
+ read_local_ports("/proc/net/udp6", IPPROTO_UDP);
+
+ struct netdata_static_thread ebpf_threads[] = {
+ {"EBPF PROCESS", NULL, NULL, 1, NULL, NULL, ebpf_modules[0].start_routine},
+ {"EBPF SOCKET" , NULL, NULL, 1, NULL, NULL, ebpf_modules[1].start_routine},
+ {NULL , NULL, NULL, 0, NULL, NULL, NULL}
+ };
+
+ //clean_loaded_events();
+
+ int i;
+ for (i = 0; ebpf_threads[i].name != NULL; i++) {
+ struct netdata_static_thread *st = &ebpf_threads[i];
+ st->thread = mallocz(sizeof(netdata_thread_t));
+
+ ebpf_module_t *em = &ebpf_modules[i];
+ em->thread_id = i;
+ netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_JOINABLE, st->start_routine, em);
+ }
+
+ for (i = 0; ebpf_threads[i].name != NULL; i++) {
+ struct netdata_static_thread *st = &ebpf_threads[i];
+ netdata_thread_join(*st->thread, NULL);
+ }
+
+ thread_finished++;
+ ebpf_exit(0);
+
+ return 0;
+}
diff --git a/collectors/ebpf.plugin/ebpf.conf b/collectors/ebpf.plugin/ebpf.conf
new file mode 100644
index 000000000..3a5b77395
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.conf
@@ -0,0 +1,45 @@
+#
+# Global options
+#
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
+# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
+# 'no'.
+#
+[global]
+ ebpf load mode = entry
+ apps = yes
+
+#
+# eBPF Programs
+#
+# The eBPF collector enables and runs the following eBPF programs by default:
+#
+# `process` : This eBPF program creates charts that show information about process creation, VFS IO, and
+# files removed.
+# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
+# bandwidth consumed by each.
+[ebpf programs]
+ process = yes
+ socket = yes
+ network connections = no
+
+#
+# Network Connection
+#
+# This is a feature with status WIP(Work in Progress)
+#
+[network connections]
+ maximum dimensions = 50
+ resolve hostnames = no
+ resolve service names = no
+ ports = *
+ ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128
+ hostnames = *
+
+[service name]
+ 19999 = Netdata
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
new file mode 100644
index 000000000..1f5822951
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_COLLECTOR_EBPF_H
+#define NETDATA_COLLECTOR_EBPF_H 1
+
+#ifndef __FreeBSD__
+#include <linux/perf_event.h>
+#endif
+#include <stdint.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <dlfcn.h>
+
+#include <fcntl.h>
+#include <ctype.h>
+#include <dirent.h>
+
+// From libnetdata.h
+#include "libnetdata/threads/threads.h"
+#include "libnetdata/locks/locks.h"
+#include "libnetdata/avl/avl.h"
+#include "libnetdata/clocks/clocks.h"
+#include "libnetdata/config/appconfig.h"
+#include "libnetdata/ebpf/ebpf.h"
+#include "libnetdata/procfile/procfile.h"
+#include "daemon/main.h"
+
+#include "ebpf_apps.h"
+
+typedef struct netdata_syscall_stat {
+ unsigned long bytes; // total number of bytes
+ uint64_t call; // total number of calls
+ uint64_t ecall; // number of calls that returned error
+ struct netdata_syscall_stat *next; // Link list
+} netdata_syscall_stat_t;
+
+typedef uint64_t netdata_idx_t;
+
+typedef struct netdata_publish_syscall {
+ char *dimension;
+ char *name;
+ unsigned long nbyte;
+ unsigned long pbyte;
+ uint64_t ncall;
+ uint64_t pcall;
+ uint64_t nerr;
+ uint64_t perr;
+ struct netdata_publish_syscall *next;
+} netdata_publish_syscall_t;
+
+typedef struct netdata_publish_vfs_common {
+ long write;
+ long read;
+
+ long running;
+ long zombie;
+} netdata_publish_vfs_common_t;
+
+typedef struct netdata_error_report {
+ char comm[16];
+ __u32 pid;
+
+ int type;
+ int err;
+} netdata_error_report_t;
+
+extern ebpf_module_t ebpf_modules[];
+#define EBPF_MODULE_PROCESS_IDX 0
+#define EBPF_MODULE_SOCKET_IDX 1
+
+// Copied from musl header
+#ifndef offsetof
+#if __GNUC__ > 3
+#define offsetof(type, member) __builtin_offsetof(type, member)
+#else
+#define offsetof(type, member) ((size_t)((char *)&(((type *)0)->member) - (char *)0))
+#endif
+#endif
+
+// Chart defintions
+#define NETDATA_EBPF_FAMILY "ebpf"
+
+// Log file
+#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
+
+// Maximum number of processors monitored on perf events
+#define NETDATA_MAX_PROCESSOR 512
+
+// Kernel versions calculated with the formula:
+// R = MAJOR*65536 + MINOR*256 + PATCH
+#define NETDATA_KERNEL_V5_3 328448
+#define NETDATA_KERNEL_V4_15 265984
+
+#define EBPF_SYS_CLONE_IDX 11
+#define EBPF_MAX_MAPS 32
+
+// Threads
+extern void *ebpf_process_thread(void *ptr);
+extern void *ebpf_socket_thread(void *ptr);
+
+// Common variables
+extern pthread_mutex_t lock;
+extern int close_ebpf_plugin;
+extern int ebpf_nprocs;
+extern int running_on_kernel;
+extern char *ebpf_plugin_dir;
+extern char kernel_string[64];
+
+extern pthread_mutex_t collect_data_mutex;
+extern pthread_cond_t collect_data_cond_var;
+
+// Common functions
+extern void ebpf_global_labels(netdata_syscall_stat_t *is,
+ netdata_publish_syscall_t *pio,
+ char **dim,
+ char **name,
+ int end);
+
+extern void ebpf_write_chart_cmd(char *type,
+ char *id,
+ char *title,
+ char *units,
+ char *family,
+ char *charttype,
+ int order);
+
+extern void ebpf_write_global_dimension(char *n, char *d);
+
+extern void ebpf_create_global_dimension(void *ptr, int end);
+
+extern void ebpf_create_chart(char *type,
+ char *id,
+ char *title,
+ char *units,
+ char *family,
+ int order,
+ void (*ncd)(void *, int),
+ void *move,
+ int end);
+
+extern void write_begin_chart(char *family, char *name);
+
+extern void write_chart_dimension(char *dim, long long value);
+
+extern void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end);
+
+extern void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end);
+
+extern void write_io_chart(char *chart, char *family, char *dwrite, char *dread, netdata_publish_vfs_common_t *pvc);
+
+extern void fill_ebpf_data(ebpf_data_t *ef);
+
+extern void ebpf_create_charts_on_apps(char *name,
+ char *title,
+ char *units,
+ char *family,
+ int order,
+ struct target *root);
+
+extern void write_end_chart();
+
+#define EBPF_GLOBAL_SECTION "global"
+#define EBPF_PROGRAMS_SECTION "ebpf programs"
+#define EBPF_NETWORK_VIEWER_SECTION "network connections"
+#define EBPF_SERVICE_NAME_SECTION "service name"
+
+#define EBPF_COMMON_DIMENSION_CALL "calls/s"
+#define EBPF_COMMON_DIMENSION_BYTESS "bytes/s"
+#define EBPF_COMMON_DIMENSION_DIFFERENCE "difference"
+#define EBPF_COMMON_DIMENSION_PACKETS "packets"
+
+// Common variables
+extern char *ebpf_user_config_dir;
+extern char *ebpf_stock_config_dir;
+extern int debug_enabled;
+extern struct pid_stat *root_of_pids;
+
+// Socket functions and variables
+// Common functions
+extern void ebpf_socket_create_apps_charts(ebpf_module_t *em, struct target *root);
+extern collected_number get_value_from_structure(char *basis, size_t offset);
+extern struct pid_stat *root_of_pids;
+extern ebpf_process_stat_t *global_process_stat;
+extern size_t all_pids_count;
+extern int update_every;
+extern uint32_t finalized_threads;
+
+#define EBPF_MAX_SYNCHRONIZATION_TIME 300
+
+#endif /* NETDATA_COLLECTOR_EBPF_H */
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
new file mode 100644
index 000000000..062c9a4e4
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_apps.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_socket.h"
+#include "ebpf_apps.h"
+
+// ----------------------------------------------------------------------------
+// internal flags
+// handled in code (automatically set)
+
+static int proc_pid_cmdline_is_needed = 0; // 1 when we need to read /proc/cmdline
+
+/*****************************************************************
+ *
+ * FUNCTIONS USED TO READ HASH TABLES
+ *
+ *****************************************************************/
+
+/**
+ * Read statistic hash table.
+ *
+ * @param ep the output structure.
+ * @param fd the file descriptor mapped from kernel ring.
+ * @param pid the index used to select the data.
+ * @param bpf_map_lookup_elem a pointer for the function used to read data.
+ *
+ * @return It returns 0 when the data was copied and -1 otherwise
+ */
+int ebpf_read_hash_table(void *ep, int fd, uint32_t pid)
+{
+ if (!ep)
+ return -1;
+
+ if (!bpf_map_lookup_elem(fd, &pid, ep))
+ return 0;
+
+ return -1;
+}
+
+/**
+ * Read socket statistic
+ *
+ * Read information from kernel ring to user ring.
+ *
+ * @param ep the table with all process stats values.
+ * @param fd the file descriptor mapped from kernel
+ * @param ef a pointer for the functions mapped from dynamic library
+ * @param pids the list of pids associated to a target.
+ *
+ * @return
+ */
+size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct pid_on_target *pids)
+{
+ size_t count = 0;
+ while (pids) {
+ uint32_t current_pid = pids->pid;
+ if (!ebpf_read_hash_table(ep[current_pid], fd, current_pid))
+ count++;
+
+ pids = pids->next;
+ }
+
+ return count;
+}
+
+/**
+ * Read bandwidth statistic using hash table
+ *
+ * @param out the output tensor that will receive the information.
+ * @param fd the file descriptor that has the data
+ * @param bpf_map_lookup_elem a pointer for the function to read the data
+ * @param bpf_map_get_next_key a pointer fo the function to read the index.
+ */
+size_t read_bandwidth_statistic_using_hash_table(ebpf_bandwidth_t **out, int fd)
+{
+ size_t count = 0;
+ uint32_t key = 0;
+ uint32_t next_key = 0;
+
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ ebpf_bandwidth_t *eps = out[next_key];
+ if (!eps) {
+ eps = callocz(1, sizeof(ebpf_process_stat_t));
+ out[next_key] = eps;
+ }
+ ebpf_read_hash_table(eps, fd, next_key);
+ }
+
+ return count;
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS CALLED FROM COLLECTORS
+ *
+ *****************************************************************/
+
+/**
+ * Am I running as Root
+ *
+ * Verify the user that is running the collector.
+ *
+ * @return It returns 1 for root and 0 otherwise.
+ */
+int am_i_running_as_root()
+{
+ uid_t uid = getuid(), euid = geteuid();
+
+ if (uid == 0 || euid == 0) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Reset the target values
+ *
+ * @param root the pointer to the chain that will be reseted.
+ *
+ * @return it returns the number of structures that was reseted.
+ */
+size_t zero_all_targets(struct target *root)
+{
+ struct target *w;
+ size_t count = 0;
+
+ for (w = root; w; w = w->next) {
+ count++;
+
+ if (unlikely(w->root_pid)) {
+ struct pid_on_target *pid_on_target = w->root_pid;
+
+ while (pid_on_target) {
+ struct pid_on_target *pid_on_target_to_free = pid_on_target;
+ pid_on_target = pid_on_target->next;
+ free(pid_on_target_to_free);
+ }
+
+ w->root_pid = NULL;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * Clean the allocated structures
+ *
+ * @param agrt the pointer to be cleaned.
+ */
+void clean_apps_groups_target(struct target *agrt)
+{
+ struct target *current_target;
+ while (agrt) {
+ current_target = agrt;
+ agrt = current_target->target;
+
+ freez(current_target);
+ }
+}
+
+/**
+ * Find or create a new target
+ * there are targets that are just aggregated to other target (the second argument)
+ *
+ * @param id
+ * @param target
+ * @param name
+ *
+ * @return It returns the target on success and NULL otherwise
+ */
+struct target *get_apps_groups_target(struct target **agrt, const char *id, struct target *target, const char *name)
+{
+ int tdebug = 0, thidden = target ? target->hidden : 0, ends_with = 0;
+ const char *nid = id;
+
+ // extract the options
+ while (nid[0] == '-' || nid[0] == '+' || nid[0] == '*') {
+ if (nid[0] == '-')
+ thidden = 1;
+ if (nid[0] == '+')
+ tdebug = 1;
+ if (nid[0] == '*')
+ ends_with = 1;
+ nid++;
+ }
+ uint32_t hash = simple_hash(id);
+
+ // find if it already exists
+ struct target *w, *last = *agrt;
+ for (w = *agrt; w; w = w->next) {
+ if (w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0)
+ return w;
+
+ last = w;
+ }
+
+ // find an existing target
+ if (unlikely(!target)) {
+ while (*name == '-') {
+ if (*name == '-')
+ thidden = 1;
+ name++;
+ }
+
+ for (target = *agrt; target != NULL; target = target->next) {
+ if (!target->target && strcmp(name, target->name) == 0)
+ break;
+ }
+ }
+
+ if (target && target->target)
+ fatal(
+ "Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id,
+ target->id, target->target->id);
+
+ w = callocz(1, sizeof(struct target));
+ strncpyz(w->id, nid, MAX_NAME);
+ w->idhash = simple_hash(w->id);
+
+ if (unlikely(!target))
+ // copy the name
+ strncpyz(w->name, name, MAX_NAME);
+ else
+ // copy the id
+ strncpyz(w->name, nid, MAX_NAME);
+
+ strncpyz(w->compare, nid, MAX_COMPARE_NAME);
+ size_t len = strlen(w->compare);
+ if (w->compare[len - 1] == '*') {
+ w->compare[len - 1] = '\0';
+ w->starts_with = 1;
+ }
+ w->ends_with = ends_with;
+
+ if (w->starts_with && w->ends_with)
+ proc_pid_cmdline_is_needed = 1;
+
+ w->comparehash = simple_hash(w->compare);
+ w->comparelen = strlen(w->compare);
+
+ w->hidden = thidden;
+#ifdef NETDATA_INTERNAL_CHECKS
+ w->debug_enabled = tdebug;
+#else
+ if (tdebug)
+ fprintf(stderr, "apps.plugin has been compiled without debugging\n");
+#endif
+ w->target = target;
+
+ // append it, to maintain the order in apps_groups.conf
+ if (last)
+ last->next = w;
+ else
+ *agrt = w;
+
+ return w;
+}
+
+/**
+ * Read the apps_groups.conf file
+ *
+ * @param agrt a pointer to apps_group_root_target
+ * @param path the directory to search apps_%s.conf
+ * @param file the word to complement the file name.
+ *
+ * @return It returns 0 on succcess and -1 otherwise
+ */
+int ebpf_read_apps_groups_conf(struct target **agdt, struct target **agrt, const char *path, const char *file)
+{
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s/apps_%s.conf", path, file);
+
+ // ----------------------------------------
+
+ procfile *ff = procfile_open(filename, " :\t", PROCFILE_FLAG_DEFAULT);
+ if (!ff)
+ return -1;
+
+ procfile_set_quotes(ff, "'\"");
+
+ ff = procfile_readall(ff);
+ if (!ff)
+ return -1;
+
+ size_t line, lines = procfile_lines(ff);
+
+ for (line = 0; line < lines; line++) {
+ size_t word, words = procfile_linewords(ff, line);
+ if (!words)
+ continue;
+
+ char *name = procfile_lineword(ff, line, 0);
+ if (!name || !*name)
+ continue;
+
+ // find a possibly existing target
+ struct target *w = NULL;
+
+ // loop through all words, skipping the first one (the name)
+ for (word = 0; word < words; word++) {
+ char *s = procfile_lineword(ff, line, word);
+ if (!s || !*s)
+ continue;
+ if (*s == '#')
+ break;
+
+ // is this the first word? skip it
+ if (s == name)
+ continue;
+
+ // add this target
+ struct target *n = get_apps_groups_target(agrt, s, w, name);
+ if (!n) {
+ error("Cannot create target '%s' (line %zu, word %zu)", s, line, word);
+ continue;
+ }
+
+ // just some optimization
+ // to avoid searching for a target for each process
+ if (!w)
+ w = n->target ? n->target : n;
+ }
+ }
+
+ procfile_close(ff);
+
+ *agdt = get_apps_groups_target(agrt, "p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing
+ if (!*agdt)
+ fatal("Cannot create default target");
+
+ struct target *ptr = *agdt;
+ if (ptr->target)
+ *agdt = ptr->target;
+
+ return 0;
+}
+
+// the minimum PID of the system
+// this is also the pid of the init process
+#define INIT_PID 1
+
+// ----------------------------------------------------------------------------
+// string lengths
+
+#define MAX_COMPARE_NAME 100
+#define MAX_NAME 100
+#define MAX_CMDLINE 16384
+
+struct pid_stat **all_pids = NULL; // to avoid allocations, we pre-allocate the
+ // the entire pid space.
+struct pid_stat *root_of_pids = NULL; // global list of all processes running
+
+size_t all_pids_count = 0; // the number of processes running
+
+struct target
+ *apps_groups_default_target = NULL, // the default target
+ *apps_groups_root_target = NULL, // apps_groups.conf defined
+ *users_root_target = NULL, // users
+ *groups_root_target = NULL; // user groups
+
+size_t apps_groups_targets_count = 0; // # of apps_groups.conf targets
+
+// ----------------------------------------------------------------------------
+// internal counters
+
+static size_t
+ // global_iterations_counter = 1,
+ calls_counter = 0,
+ // file_counter = 0,
+ // filenames_allocated_counter = 0,
+ // inodes_changed_counter = 0,
+ // links_changed_counter = 0,
+ targets_assignment_counter = 0;
+
+// ----------------------------------------------------------------------------
+// debugging
+
+// log each problem once per process
+// log flood protection flags (log_thrown)
+#define PID_LOG_IO 0x00000001
+#define PID_LOG_STATUS 0x00000002
+#define PID_LOG_CMDLINE 0x00000004
+#define PID_LOG_FDS 0x00000008
+#define PID_LOG_STAT 0x00000010
+
+int debug_enabled = 0;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+#define debug_log(fmt, args...) \
+ do { \
+ if (unlikely(debug_enabled)) \
+ debug_log_int(fmt, ##args); \
+ } while (0)
+
+#else
+
+static inline void debug_log_dummy(void)
+{
+}
+#define debug_log(fmt, args...) debug_log_dummy()
+
+#endif
+
+/**
+ * Managed log
+ *
+ * Store log information if it is necessary.
+ *
+ * @param p the pid stat structure
+ * @param log the log id
+ * @param status the return from a function.
+ *
+ * @return It returns the status value.
+ */
+static inline int managed_log(struct pid_stat *p, uint32_t log, int status)
+{
+ if (unlikely(!status)) {
+ // error("command failed log %u, errno %d", log, errno);
+
+ if (unlikely(debug_enabled || errno != ENOENT)) {
+ if (unlikely(debug_enabled || !(p->log_thrown & log))) {
+ p->log_thrown |= log;
+ switch (log) {
+ case PID_LOG_IO:
+ error(
+ "Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid,
+ p->comm);
+ break;
+
+ case PID_LOG_STATUS:
+ error(
+ "Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid,
+ p->comm);
+ break;
+
+ case PID_LOG_CMDLINE:
+ error(
+ "Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid,
+ p->comm);
+ break;
+
+ case PID_LOG_FDS:
+ error(
+ "Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix,
+ p->pid, p->comm);
+ break;
+
+ case PID_LOG_STAT:
+ break;
+
+ default:
+ error("unhandled error for pid %d, command '%s'", p->pid, p->comm);
+ break;
+ }
+ }
+ }
+ errno = 0;
+ } else if (unlikely(p->log_thrown & log)) {
+ // error("unsetting log %u on pid %d", log, p->pid);
+ p->log_thrown &= ~log;
+ }
+
+ return status;
+}
+
+/**
+ * Get PID entry
+ *
+ * Get or allocate the PID entry for the specifid pid.
+ *
+ * @param pid the pid to search the data.
+ *
+ * @return It returns the pid entry structure
+ */
+static inline struct pid_stat *get_pid_entry(pid_t pid)
+{
+ if (unlikely(all_pids[pid]))
+ return all_pids[pid];
+
+ struct pid_stat *p = callocz(1, sizeof(struct pid_stat));
+
+ if (likely(root_of_pids))
+ root_of_pids->prev = p;
+
+ p->next = root_of_pids;
+ root_of_pids = p;
+
+ p->pid = pid;
+
+ all_pids[pid] = p;
+ all_pids_count++;
+
+ return p;
+}
+
+/**
+ * Assign the PID to a target.
+ *
+ * @param p the pid_stat structure to assign for a target.
+ */
+static inline void assign_target_to_pid(struct pid_stat *p)
+{
+ targets_assignment_counter++;
+
+ uint32_t hash = simple_hash(p->comm);
+ size_t pclen = strlen(p->comm);
+
+ struct target *w;
+ for (w = apps_groups_root_target; w; w = w->next) {
+ // if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm);
+
+ // find it - 4 cases:
+ // 1. the target is not a pattern
+ // 2. the target has the prefix
+ // 3. the target has the suffix
+ // 4. the target is something inside cmdline
+
+ if (unlikely(
+ ((!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm)) ||
+ (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen)) ||
+ (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen])) ||
+ (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare))))) {
+ if (w->target)
+ p->target = w->target;
+ else
+ p->target = w;
+
+ if (debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int("%s linked to target %s", p->comm, p->target->name);
+
+ break;
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// update pids from proc
+
+/**
+ * Read cmd line from /proc/PID/cmdline
+ *
+ * @param p the pid_stat_structure.
+ *
+ * @return It returns 1 on success and 0 otherwise.
+ */
+static inline int read_proc_pid_cmdline(struct pid_stat *p)
+{
+ static char cmdline[MAX_CMDLINE + 1];
+
+ if (unlikely(!p->cmdline_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
+ p->cmdline_filename = strdupz(filename);
+ }
+
+ int fd = open(p->cmdline_filename, procfile_open_flags, 0666);
+ if (unlikely(fd == -1))
+ goto cleanup;
+
+ ssize_t i, bytes = read(fd, cmdline, MAX_CMDLINE);
+ close(fd);
+
+ if (unlikely(bytes < 0))
+ goto cleanup;
+
+ cmdline[bytes] = '\0';
+ for (i = 0; i < bytes; i++) {
+ if (unlikely(!cmdline[i]))
+ cmdline[i] = ' ';
+ }
+
+ if (p->cmdline)
+ freez(p->cmdline);
+ p->cmdline = strdupz(cmdline);
+
+ debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline);
+
+ return 1;
+
+cleanup:
+ // copy the command to the command line
+ if (p->cmdline)
+ freez(p->cmdline);
+ p->cmdline = strdupz(p->comm);
+ return 0;
+}
+
+/**
+ * Read information from /proc/PID/stat and /proc/PID/cmdline
+ * Assign target to pid
+ *
+ * @param p the pid stat structure to store the data.
+ * @param ptr an useless argument.
+ */
+static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr)
+{
+ UNUSED(ptr);
+
+ static procfile *ff = NULL;
+
+ if (unlikely(!p->stat_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid);
+ p->stat_filename = strdupz(filename);
+ }
+
+ int set_quotes = (!ff) ? 1 : 0;
+
+ struct stat statbuf;
+ if (stat(p->stat_filename, &statbuf))
+ return 0;
+
+ ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if (unlikely(!ff))
+ return 0;
+
+ if (unlikely(set_quotes))
+ procfile_set_open_close(ff, "(", ")");
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff))
+ return 0;
+
+ p->last_stat_collected_usec = p->stat_collected_usec;
+ p->stat_collected_usec = now_monotonic_usec();
+ calls_counter++;
+
+ char *comm = procfile_lineword(ff, 0, 1);
+ p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
+
+ if (strcmp(p->comm, comm) != 0) {
+ if (unlikely(debug_enabled)) {
+ if (p->comm[0])
+ debug_log("\tpid %d (%s) changed name to '%s'", p->pid, p->comm, comm);
+ else
+ debug_log("\tJust added %d (%s)", p->pid, comm);
+ }
+
+ strncpyz(p->comm, comm, MAX_COMPARE_NAME);
+
+ // /proc/<pid>/cmdline
+ if (likely(proc_pid_cmdline_is_needed))
+ managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p));
+
+ assign_target_to_pid(p);
+ }
+
+ if (unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
+ debug_log_int(
+ "READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu)",
+ netdata_configured_host_prefix, p->pid, p->comm, (p->target) ? p->target->name : "UNSET",
+ p->stat_collected_usec - p->last_stat_collected_usec);
+
+ return 1;
+}
+
+/**
+ * Collect data for PID
+ *
+ * @param pid the current pid that we are working
+ * @param ptr a NULL value
+ *
+ * @return It returns 1 on succcess and 0 otherwise
+ */
+static inline int collect_data_for_pid(pid_t pid, void *ptr)
+{
+ if (unlikely(pid < 0 || pid > pid_max)) {
+ error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max);
+ return 0;
+ }
+
+ struct pid_stat *p = get_pid_entry(pid);
+ if (unlikely(!p || p->read))
+ return 0;
+ p->read = 1;
+
+ if (unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr))))
+ // there is no reason to proceed if we cannot get its status
+ return 0;
+
+ // check its parent pid
+ if (unlikely(p->ppid < 0 || p->ppid > pid_max)) {
+ error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid);
+ p->ppid = 0;
+ }
+
+ // mark it as updated
+ p->updated = 1;
+ p->keep = 0;
+ p->keeploops = 0;
+
+ return 1;
+}
+
+/**
+ * Fill link list of parents with children PIDs
+ */
+static inline void link_all_processes_to_their_parents(void)
+{
+ struct pid_stat *p, *pp;
+
+ // link all children to their parents
+ // and update children count on parents
+ for (p = root_of_pids; p; p = p->next) {
+ // for each process found
+
+ p->sortlist = 0;
+ p->parent = NULL;
+
+ if (unlikely(!p->ppid)) {
+ p->parent = NULL;
+ continue;
+ }
+
+ pp = all_pids[p->ppid];
+ if (likely(pp)) {
+ p->parent = pp;
+ pp->children_count++;
+
+ if (unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
+ debug_log_int(
+ "child %d (%s, %s) on target '%s' has parent %d (%s, %s).", p->pid, p->comm,
+ p->updated ? "running" : "exited", (p->target) ? p->target->name : "UNSET", pp->pid, pp->comm,
+ pp->updated ? "running" : "exited");
+ } else {
+ p->parent = NULL;
+ debug_log("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid);
+ }
+ }
+}
+
+/**
+ * Aggregate PIDs to targets.
+ */
+static void apply_apps_groups_targets_inheritance(void)
+{
+ struct pid_stat *p = NULL;
+
+ // children that do not have a target
+ // inherit their target from their parent
+ int found = 1, loops = 0;
+ while (found) {
+ if (unlikely(debug_enabled))
+ loops++;
+ found = 0;
+ for (p = root_of_pids; p; p = p->next) {
+ // if this process does not have a target
+ // and it has a parent
+ // and its parent has a target
+ // then, set the parent's target to this process
+ if (unlikely(!p->target && p->parent && p->parent->target)) {
+ p->target = p->parent->target;
+ found++;
+
+ if (debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int(
+ "TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name,
+ p->pid, p->comm, p->parent->pid, p->parent->comm);
+ }
+ }
+ }
+
+ // find all the procs with 0 childs and merge them to their parents
+ // repeat, until nothing more can be done.
+ int sortlist = 1;
+ found = 1;
+ while (found) {
+ if (unlikely(debug_enabled))
+ loops++;
+ found = 0;
+
+ for (p = root_of_pids; p; p = p->next) {
+ if (unlikely(!p->sortlist && !p->children_count))
+ p->sortlist = sortlist++;
+
+ if (unlikely(
+ !p->children_count // if this process does not have any children
+ && !p->merged // and is not already merged
+ && p->parent // and has a parent
+ && p->parent->children_count // and its parent has children
+ // and the target of this process and its parent is the same,
+ // or the parent does not have a target
+ && (p->target == p->parent->target || !p->parent->target) &&
+ p->ppid != INIT_PID // and its parent is not init
+ )) {
+ // mark it as merged
+ p->parent->children_count--;
+ p->merged = 1;
+
+ // the parent inherits the child's target, if it does not have a target itself
+ if (unlikely(p->target && !p->parent->target)) {
+ p->parent->target = p->target;
+
+ if (debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int(
+ "TARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).", p->target->name,
+ p->parent->pid, p->parent->comm, p->pid, p->comm);
+ }
+
+ found++;
+ }
+ }
+
+ debug_log("TARGET INHERITANCE: merged %d processes", found);
+ }
+
+ // init goes always to default target
+ if (all_pids[INIT_PID])
+ all_pids[INIT_PID]->target = apps_groups_default_target;
+
+ // pid 0 goes always to default target
+ if (all_pids[0])
+ all_pids[0]->target = apps_groups_default_target;
+
+ // give a default target on all top level processes
+ if (unlikely(debug_enabled))
+ loops++;
+ for (p = root_of_pids; p; p = p->next) {
+ // if the process is not merged itself
+ // then is is a top level process
+ if (unlikely(!p->merged && !p->target))
+ p->target = apps_groups_default_target;
+
+ // make sure all processes have a sortlist
+ if (unlikely(!p->sortlist))
+ p->sortlist = sortlist++;
+ }
+
+ if (all_pids[1])
+ all_pids[1]->sortlist = sortlist++;
+
+ // give a target to all merged child processes
+ found = 1;
+ while (found) {
+ if (unlikely(debug_enabled))
+ loops++;
+ found = 0;
+ for (p = root_of_pids; p; p = p->next) {
+ if (unlikely(!p->target && p->merged && p->parent && p->parent->target)) {
+ p->target = p->parent->target;
+ found++;
+
+ if (debug_enabled || (p->target && p->target->debug_enabled))
+ debug_log_int(
+ "TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.",
+ p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
+ }
+ }
+ }
+
+ debug_log("apply_apps_groups_targets_inheritance() made %d loops on the process tree", loops);
+}
+
+/**
+ * Update target timestamp.
+ *
+ * @param root the targets that will be updated.
+ */
+static inline void post_aggregate_targets(struct target *root)
+{
+ struct target *w;
+ for (w = root; w; w = w->next) {
+ if (w->collected_starttime) {
+ if (!w->starttime || w->collected_starttime < w->starttime) {
+ w->starttime = w->collected_starttime;
+ }
+ } else {
+ w->starttime = 0;
+ }
+ }
+}
+
+/**
+ * Remove PID from the link list.
+ *
+ * @param pid the PID that will be removed.
+ */
+static inline void del_pid_entry(pid_t pid)
+{
+ struct pid_stat *p = all_pids[pid];
+
+ if (unlikely(!p)) {
+ error("attempted to free pid %d that is not allocated.", pid);
+ return;
+ }
+
+ debug_log("process %d %s exited, deleting it.", pid, p->comm);
+
+ if (root_of_pids == p)
+ root_of_pids = p->next;
+
+ if (p->next)
+ p->next->prev = p->prev;
+ if (p->prev)
+ p->prev->next = p->next;
+
+ freez(p->stat_filename);
+ freez(p->status_filename);
+ freez(p->io_filename);
+ freez(p->cmdline_filename);
+ freez(p->cmdline);
+ freez(p);
+
+ all_pids[pid] = NULL;
+ all_pids_count--;
+}
+
+/**
+ * Remove PIDs when they are not running more.
+ */
+void cleanup_exited_pids()
+{
+ struct pid_stat *p = NULL;
+
+ for (p = root_of_pids; p;) {
+ if (!p->updated && (!p->keep || p->keeploops > 0)) {
+ if (unlikely(debug_enabled && (p->keep || p->keeploops)))
+ debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
+
+ pid_t r = p->pid;
+ p = p->next;
+ del_pid_entry(r);
+
+ // Clean process structure
+ freez(global_process_stats[r]);
+ global_process_stats[r] = NULL;
+
+ freez(current_apps_data[r]);
+ current_apps_data[r] = NULL;
+ prev_apps_data[r] = NULL;
+
+ // Clean socket structures
+ if (socket_bandwidth_curr) {
+ freez(socket_bandwidth_curr[r]);
+ socket_bandwidth_curr[r] = NULL;
+ socket_bandwidth_prev[r] = NULL;
+ }
+ } else {
+ if (unlikely(p->keep))
+ p->keeploops++;
+ p->keep = 0;
+ p = p->next;
+ }
+ }
+}
+
+/**
+ * Read proc filesystem for the first time.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+static inline void read_proc_filesystem()
+{
+ char dirname[FILENAME_MAX + 1];
+
+ snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
+ DIR *dir = opendir(dirname);
+ if (!dir)
+ return;
+
+ struct dirent *de = NULL;
+
+ while ((de = readdir(dir))) {
+ char *endptr = de->d_name;
+
+ if (unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9'))
+ continue;
+
+ pid_t pid = (pid_t)strtoul(de->d_name, &endptr, 10);
+
+ // make sure we read a valid number
+ if (unlikely(endptr == de->d_name || *endptr != '\0'))
+ continue;
+
+ collect_data_for_pid(pid, NULL);
+ }
+ closedir(dir);
+}
+
+/**
+ * Aggregated PID on target
+ *
+ * @param w the target output
+ * @param p the pid with information to update
+ * @param o never used
+ */
+static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o)
+{
+ UNUSED(o);
+
+ if (unlikely(!p->updated)) {
+ // the process is not running
+ return;
+ }
+
+ if (unlikely(!w)) {
+ error("pid %d %s was left without a target!", p->pid, p->comm);
+ return;
+ }
+
+ w->processes++;
+ struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target));
+ pid_on_target->pid = p->pid;
+ pid_on_target->next = w->root_pid;
+ w->root_pid = pid_on_target;
+}
+
+/**
+ * Collect data for all process
+ *
+ * Read data from hash table and store it in appropriate vectors.
+ * It also creates the link between targets and PIDs.
+ *
+ * @param tbl_pid_stats_fd The mapped file descriptor for the hash table.
+ */
+void collect_data_for_all_processes(int tbl_pid_stats_fd)
+{
+ struct pid_stat *pids = root_of_pids; // global list of all processes running
+ while (pids) {
+ if (pids->updated_twice) {
+ pids->read = 0; // mark it as not read, so that collect_data_for_pid() will read it
+ pids->updated = 0;
+ pids->merged = 0;
+ pids->children_count = 0;
+ pids->parent = NULL;
+ } else {
+ if (pids->updated)
+ pids->updated_twice = 1;
+ }
+
+ pids = pids->next;
+ }
+
+ read_proc_filesystem();
+
+ uint32_t key;
+ pids = root_of_pids; // global list of all processes running
+ // while (bpf_map_get_next_key(tbl_pid_stats_fd, &key, &next_key) == 0) {
+ while (pids) {
+ key = pids->pid;
+ ebpf_process_stat_t *w = global_process_stats[key];
+ if (!w) {
+ w = mallocz(sizeof(ebpf_process_stat_t));
+ global_process_stats[key] = w;
+ }
+
+ if (bpf_map_lookup_elem(tbl_pid_stats_fd, &key, w)) {
+ // Clean Process structures
+ freez(w);
+ global_process_stats[key] = NULL;
+
+ freez(current_apps_data[key]);
+ current_apps_data[key] = NULL;
+ prev_apps_data[key] = NULL;
+
+ // Clean socket structures
+ if (socket_bandwidth_curr) {
+ freez(socket_bandwidth_curr[key]);
+ socket_bandwidth_curr[key] = NULL;
+ socket_bandwidth_prev[key] = NULL;
+ }
+
+ pids = pids->next;
+ continue;
+ }
+
+ pids = pids->next;
+ }
+
+ link_all_processes_to_their_parents();
+
+ apply_apps_groups_targets_inheritance();
+
+ apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
+
+ // this has to be done, before the cleanup
+ // // concentrate everything on the targets
+ for (pids = root_of_pids; pids; pids = pids->next)
+ aggregate_pid_on_target(pids->target, pids, NULL);
+
+ post_aggregate_targets(apps_groups_root_target);
+}
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
new file mode 100644
index 000000000..46d36966e
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_apps.h
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_APPS_H
+#define NETDATA_EBPF_APPS_H 1
+
+#include "libnetdata/threads/threads.h"
+#include "libnetdata/locks/locks.h"
+#include "libnetdata/avl/avl.h"
+#include "libnetdata/clocks/clocks.h"
+#include "libnetdata/config/appconfig.h"
+#include "libnetdata/ebpf/ebpf.h"
+
+#define NETDATA_APPS_FAMILY "apps"
+#define NETDATA_APPS_FILE_GROUP "ebpf file"
+#define NETDATA_APPS_VFS_GROUP "ebpf vfs"
+#define NETDATA_APPS_PROCESS_GROUP "ebpf process"
+#define NETDATA_APPS_NET_GROUP "ebpf net"
+
+#include "ebpf_process.h"
+
+#define MAX_COMPARE_NAME 100
+#define MAX_NAME 100
+
+// ----------------------------------------------------------------------------
+// process_pid_stat
+//
+// Fields read from the kernel ring for a specific PID
+//
+typedef struct process_pid_stat {
+ uint64_t pid_tgid; // Unique identifier
+ uint32_t pid; // process id
+
+ // Count number of calls done for specific function
+ uint32_t open_call;
+ uint32_t write_call;
+ uint32_t writev_call;
+ uint32_t read_call;
+ uint32_t readv_call;
+ uint32_t unlink_call;
+ uint32_t exit_call;
+ uint32_t release_call;
+ uint32_t fork_call;
+ uint32_t clone_call;
+ uint32_t close_call;
+
+ // Count number of bytes written or read
+ uint64_t write_bytes;
+ uint64_t writev_bytes;
+ uint64_t readv_bytes;
+ uint64_t read_bytes;
+
+ // Count number of errors for the specified function
+ uint32_t open_err;
+ uint32_t write_err;
+ uint32_t writev_err;
+ uint32_t read_err;
+ uint32_t readv_err;
+ uint32_t unlink_err;
+ uint32_t fork_err;
+ uint32_t clone_err;
+ uint32_t close_err;
+} process_pid_stat_t;
+
+// ----------------------------------------------------------------------------
+// socket_bandwidth
+//
+// Fields read from the kernel ring for a specific PID
+//
+typedef struct socket_bandwidth {
+ uint64_t first;
+ uint64_t ct;
+ uint64_t sent;
+ uint64_t received;
+ unsigned char removed;
+} socket_bandwidth_t;
+
+// ----------------------------------------------------------------------------
+// pid_stat
+//
+// structure to store data for each process running
+// see: man proc for the description of the fields
+
+struct pid_fd {
+ int fd;
+
+#ifndef __FreeBSD__
+ ino_t inode;
+ char *filename;
+ uint32_t link_hash;
+ size_t cache_iterations_counter;
+ size_t cache_iterations_reset;
+#endif
+};
+
+struct target {
+ char compare[MAX_COMPARE_NAME + 1];
+ uint32_t comparehash;
+ size_t comparelen;
+
+ char id[MAX_NAME + 1];
+ uint32_t idhash;
+
+ char name[MAX_NAME + 1];
+
+ uid_t uid;
+ gid_t gid;
+
+ /* These variables are not necessary for eBPF collector
+ kernel_uint_t minflt;
+ kernel_uint_t cminflt;
+ kernel_uint_t majflt;
+ kernel_uint_t cmajflt;
+ kernel_uint_t utime;
+ kernel_uint_t stime;
+ kernel_uint_t gtime;
+ kernel_uint_t cutime;
+ kernel_uint_t cstime;
+ kernel_uint_t cgtime;
+ kernel_uint_t num_threads;
+ // kernel_uint_t rss;
+
+ kernel_uint_t status_vmsize;
+ kernel_uint_t status_vmrss;
+ kernel_uint_t status_vmshared;
+ kernel_uint_t status_rssfile;
+ kernel_uint_t status_rssshmem;
+ kernel_uint_t status_vmswap;
+
+ kernel_uint_t io_logical_bytes_read;
+ kernel_uint_t io_logical_bytes_written;
+ // kernel_uint_t io_read_calls;
+ // kernel_uint_t io_write_calls;
+ kernel_uint_t io_storage_bytes_read;
+ kernel_uint_t io_storage_bytes_written;
+ // kernel_uint_t io_cancelled_write_bytes;
+
+ int *target_fds;
+ int target_fds_size;
+
+ kernel_uint_t openfiles;
+ kernel_uint_t openpipes;
+ kernel_uint_t opensockets;
+ kernel_uint_t openinotifies;
+ kernel_uint_t openeventfds;
+ kernel_uint_t opentimerfds;
+ kernel_uint_t opensignalfds;
+ kernel_uint_t openeventpolls;
+ kernel_uint_t openother;
+ */
+
+ kernel_uint_t starttime;
+ kernel_uint_t collected_starttime;
+
+ /*
+ kernel_uint_t uptime_min;
+ kernel_uint_t uptime_sum;
+ kernel_uint_t uptime_max;
+ */
+
+ unsigned int processes; // how many processes have been merged to this
+ int exposed; // if set, we have sent this to netdata
+ int hidden; // if set, we set the hidden flag on the dimension
+ int debug_enabled;
+ int ends_with;
+ int starts_with; // if set, the compare string matches only the
+ // beginning of the command
+
+ struct pid_on_target *root_pid; // list of aggregated pids for target debugging
+
+ struct target *target; // the one that will be reported to netdata
+ struct target *next;
+};
+
+extern struct target *apps_groups_default_target;
+extern struct target *apps_groups_root_target;
+extern struct target *users_root_target;
+extern struct target *groups_root_target;
+
+struct pid_stat {
+ int32_t pid;
+ char comm[MAX_COMPARE_NAME + 1];
+ char *cmdline;
+
+ uint32_t log_thrown;
+
+ // char state;
+ int32_t ppid;
+
+ // int32_t pgrp;
+ // int32_t session;
+ // int32_t tty_nr;
+ // int32_t tpgid;
+ // uint64_t flags;
+
+ /*
+ // these are raw values collected
+ kernel_uint_t minflt_raw;
+ kernel_uint_t cminflt_raw;
+ kernel_uint_t majflt_raw;
+ kernel_uint_t cmajflt_raw;
+ kernel_uint_t utime_raw;
+ kernel_uint_t stime_raw;
+ kernel_uint_t gtime_raw; // guest_time
+ kernel_uint_t cutime_raw;
+ kernel_uint_t cstime_raw;
+ kernel_uint_t cgtime_raw; // cguest_time
+
+ // these are rates
+ kernel_uint_t minflt;
+ kernel_uint_t cminflt;
+ kernel_uint_t majflt;
+ kernel_uint_t cmajflt;
+ kernel_uint_t utime;
+ kernel_uint_t stime;
+ kernel_uint_t gtime;
+ kernel_uint_t cutime;
+ kernel_uint_t cstime;
+ kernel_uint_t cgtime;
+
+ // int64_t priority;
+ // int64_t nice;
+ int32_t num_threads;
+ // int64_t itrealvalue;
+ kernel_uint_t collected_starttime;
+ // kernel_uint_t vsize;
+ // kernel_uint_t rss;
+ // kernel_uint_t rsslim;
+ // kernel_uint_t starcode;
+ // kernel_uint_t endcode;
+ // kernel_uint_t startstack;
+ // kernel_uint_t kstkesp;
+ // kernel_uint_t kstkeip;
+ // uint64_t signal;
+ // uint64_t blocked;
+ // uint64_t sigignore;
+ // uint64_t sigcatch;
+ // uint64_t wchan;
+ // uint64_t nswap;
+ // uint64_t cnswap;
+ // int32_t exit_signal;
+ // int32_t processor;
+ // uint32_t rt_priority;
+ // uint32_t policy;
+ // kernel_uint_t delayacct_blkio_ticks;
+
+ uid_t uid;
+ gid_t gid;
+
+ kernel_uint_t status_vmsize;
+ kernel_uint_t status_vmrss;
+ kernel_uint_t status_vmshared;
+ kernel_uint_t status_rssfile;
+ kernel_uint_t status_rssshmem;
+ kernel_uint_t status_vmswap;
+#ifndef __FreeBSD__
+ ARL_BASE *status_arl;
+#endif
+
+ kernel_uint_t io_logical_bytes_read_raw;
+ kernel_uint_t io_logical_bytes_written_raw;
+ // kernel_uint_t io_read_calls_raw;
+ // kernel_uint_t io_write_calls_raw;
+ kernel_uint_t io_storage_bytes_read_raw;
+ kernel_uint_t io_storage_bytes_written_raw;
+ // kernel_uint_t io_cancelled_write_bytes_raw;
+
+ kernel_uint_t io_logical_bytes_read;
+ kernel_uint_t io_logical_bytes_written;
+ // kernel_uint_t io_read_calls;
+ // kernel_uint_t io_write_calls;
+ kernel_uint_t io_storage_bytes_read;
+ kernel_uint_t io_storage_bytes_written;
+ // kernel_uint_t io_cancelled_write_bytes;
+ */
+
+ struct pid_fd *fds; // array of fds it uses
+ size_t fds_size; // the size of the fds array
+
+ int children_count; // number of processes directly referencing this
+ unsigned char keep : 1; // 1 when we need to keep this process in memory even after it exited
+ int keeploops; // increases by 1 every time keep is 1 and updated 0
+ unsigned char updated : 1; // 1 when the process is currently running
+ unsigned char updated_twice : 1; // 1 when the process was running in the previous iteration
+ unsigned char merged : 1; // 1 when it has been merged to its parent
+ unsigned char read : 1; // 1 when we have already read this process for this iteration
+
+ int sortlist; // higher numbers = top on the process tree
+
+ // each process gets a unique number
+
+ struct target *target; // app_groups.conf targets
+ struct target *user_target; // uid based targets
+ struct target *group_target; // gid based targets
+
+ usec_t stat_collected_usec;
+ usec_t last_stat_collected_usec;
+
+ usec_t io_collected_usec;
+ usec_t last_io_collected_usec;
+
+ kernel_uint_t uptime;
+
+ char *fds_dirname; // the full directory name in /proc/PID/fd
+
+ char *stat_filename;
+ char *status_filename;
+ char *io_filename;
+ char *cmdline_filename;
+
+ struct pid_stat *parent;
+ struct pid_stat *prev;
+ struct pid_stat *next;
+};
+
+// ----------------------------------------------------------------------------
+// target
+//
+// target is the structure that processes are aggregated to be reported
+// to netdata.
+//
+// - Each entry in /etc/apps_groups.conf creates a target.
+// - Each user and group used by a process in the system, creates a target.
+struct pid_on_target {
+ int32_t pid;
+ struct pid_on_target *next;
+};
+
+// ----------------------------------------------------------------------------
+// Structures used to read information from kernel ring
+typedef struct ebpf_process_stat {
+ uint64_t pid_tgid;
+ uint32_t pid;
+
+ //Counter
+ uint32_t open_call;
+ uint32_t write_call;
+ uint32_t writev_call;
+ uint32_t read_call;
+ uint32_t readv_call;
+ uint32_t unlink_call;
+ uint32_t exit_call;
+ uint32_t release_call;
+ uint32_t fork_call;
+ uint32_t clone_call;
+ uint32_t close_call;
+
+ //Accumulator
+ uint64_t write_bytes;
+ uint64_t writev_bytes;
+ uint64_t readv_bytes;
+ uint64_t read_bytes;
+
+ //Counter
+ uint32_t open_err;
+ uint32_t write_err;
+ uint32_t writev_err;
+ uint32_t read_err;
+ uint32_t readv_err;
+ uint32_t unlink_err;
+ uint32_t fork_err;
+ uint32_t clone_err;
+ uint32_t close_err;
+
+ uint8_t removeme;
+} ebpf_process_stat_t;
+
+typedef struct ebpf_bandwidth {
+ uint32_t pid;
+
+ uint64_t first; // First timestamp
+ uint64_t ct; // Last timestamp
+ uint64_t bytes_sent; // Bytes sent
+ uint64_t bytes_received; // Bytes received
+ uint64_t call_tcp_sent; // Number of times tcp_sendmsg was called
+ uint64_t call_tcp_received; // Number of times tcp_cleanup_rbuf was called
+ uint64_t retransmit; // Number of times tcp_retransmit was called
+ uint64_t call_udp_sent; // Number of times udp_sendmsg was called
+ uint64_t call_udp_received; // Number of times udp_recvmsg was called
+} ebpf_bandwidth_t;
+
+/**
+ * Internal function used to write debug messages.
+ *
+ * @param fmt the format to create the message.
+ * @param ... the arguments to fill the format.
+ */
+static inline void debug_log_int(const char *fmt, ...)
+{
+ va_list args;
+
+ fprintf(stderr, "apps.plugin: ");
+ va_start(args, fmt);
+ vfprintf(stderr, fmt, args);
+ va_end(args);
+
+ fputc('\n', stderr);
+}
+
+// ----------------------------------------------------------------------------
+// Exported variabled and functions
+//
+extern struct pid_stat **all_pids;
+
+extern int ebpf_read_apps_groups_conf(struct target **apps_groups_default_target,
+ struct target **apps_groups_root_target,
+ const char *path,
+ const char *file);
+
+extern void clean_apps_groups_target(struct target *apps_groups_root_target);
+
+extern size_t zero_all_targets(struct target *root);
+
+extern int am_i_running_as_root();
+
+extern void cleanup_exited_pids();
+
+extern int ebpf_read_hash_table(void *ep, int fd, uint32_t pid);
+
+extern size_t read_processes_statistic_using_pid_on_target(ebpf_process_stat_t **ep,
+ int fd,
+ struct pid_on_target *pids);
+
+extern size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct pid_on_target *pids);
+
+extern void collect_data_for_all_processes(int tbl_pid_stats_fd);
+
+extern ebpf_process_stat_t **global_process_stats;
+extern ebpf_process_publish_apps_t **current_apps_data;
+extern ebpf_process_publish_apps_t **prev_apps_data;
+
+#endif /* NETDATA_EBPF_APPS_H */
diff --git a/collectors/ebpf.plugin/ebpf_kernel_reject_list.txt b/collectors/ebpf.plugin/ebpf_kernel_reject_list.txt
new file mode 100644
index 000000000..d56b216a9
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_kernel_reject_list.txt
@@ -0,0 +1 @@
+Ubuntu 4.18.0-13.
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
new file mode 100644
index 000000000..9a1d69c06
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -0,0 +1,1071 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/resource.h>
+
+#include "ebpf.h"
+#include "ebpf_process.h"
+
+/*****************************************************************
+ *
+ * GLOBAL VARIABLES
+ *
+ *****************************************************************/
+
+static char *process_dimension_names[NETDATA_MAX_MONITOR_VECTOR] = { "open", "close", "delete", "read", "write",
+ "process", "task", "process", "thread" };
+static char *process_id_names[NETDATA_MAX_MONITOR_VECTOR] = { "do_sys_open", "__close_fd", "vfs_unlink",
+ "vfs_read", "vfs_write", "do_exit",
+ "release_task", "_do_fork", "sys_clone" };
+static char *status[] = { "process", "zombie" };
+
+static netdata_idx_t *process_hash_values = NULL;
+static netdata_syscall_stat_t *process_aggregated_data = NULL;
+static netdata_publish_syscall_t *process_publish_aggregated = NULL;
+
+static ebpf_data_t process_data;
+
+ebpf_process_stat_t **global_process_stats = NULL;
+ebpf_process_publish_apps_t **current_apps_data = NULL;
+ebpf_process_publish_apps_t **prev_apps_data = NULL;
+
+int process_enabled = 0;
+
+static int *map_fd = NULL;
+static struct bpf_object *objects = NULL;
+static struct bpf_link **probe_links = NULL;
+
+/*****************************************************************
+ *
+ * PROCESS DATA AND SEND TO NETDATA
+ *
+ *****************************************************************/
+
+/**
+ * Update publish structure before to send data to Netdata.
+ *
+ * @param publish the first output structure with independent dimensions
+ * @param pvc the second output structure with correlated dimensions
+ * @param input the structure with the input data.
+ */
+static void ebpf_update_global_publish(
+ netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc, netdata_syscall_stat_t *input)
+{
+ netdata_publish_syscall_t *move = publish;
+ while (move) {
+ if (input->call != move->pcall) {
+ //This condition happens to avoid initial values with dimensions higher than normal values.
+ if (move->pcall) {
+ move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call;
+ move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes;
+ move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall;
+ } else {
+ move->ncall = 0;
+ move->nbyte = 0;
+ move->nerr = 0;
+ }
+
+ move->pcall = input->call;
+ move->pbyte = input->bytes;
+ move->perr = input->ecall;
+ } else {
+ move->ncall = 0;
+ move->nbyte = 0;
+ move->nerr = 0;
+ }
+
+ input = input->next;
+ move = move->next;
+ }
+
+ pvc->write = -((long)publish[2].nbyte);
+ pvc->read = (long)publish[3].nbyte;
+
+ pvc->running = (long)publish[7].ncall - (long)publish[8].ncall;
+ publish[6].ncall = -publish[6].ncall; // release
+ pvc->zombie = (long)publish[5].ncall + (long)publish[6].ncall;
+}
+
+/**
+ * Update apps dimension to publish.
+ *
+ * @param curr Last values read from memory.
+ * @param prev Previous values read from memory.
+ * @param first was it allocated now?
+ */
+static void
+ebpf_process_update_apps_publish(ebpf_process_publish_apps_t *curr, ebpf_process_publish_apps_t *prev, int first)
+{
+ if (first)
+ return;
+
+ curr->publish_open = curr->call_sys_open - prev->call_sys_open;
+ curr->publish_closed = curr->call_close_fd - prev->call_close_fd;
+ curr->publish_deleted = curr->call_vfs_unlink - prev->call_vfs_unlink;
+ curr->publish_write_call = curr->call_write - prev->call_write;
+ curr->publish_write_bytes = curr->bytes_written - prev->bytes_written;
+ curr->publish_read_call = curr->call_read - prev->call_read;
+ curr->publish_read_bytes = curr->bytes_read - prev->bytes_read;
+ curr->publish_process = curr->call_do_fork - prev->call_do_fork;
+ curr->publish_thread = curr->call_sys_clone - prev->call_sys_clone;
+ curr->publish_task = curr->call_release_task - prev->call_release_task;
+ curr->publish_open_error = curr->ecall_sys_open - prev->ecall_sys_open;
+ curr->publish_close_error = curr->ecall_close_fd - prev->ecall_close_fd;
+ curr->publish_write_error = curr->ecall_write - prev->ecall_write;
+ curr->publish_read_error = curr->ecall_read - prev->ecall_read;
+}
+
+/**
+ * Call the necessary functions to create a chart.
+ *
+ * @param family the chart family
+ * @param move the pointer with the values that will be published
+ */
+static void write_status_chart(char *family, netdata_publish_vfs_common_t *pvc)
+{
+ write_begin_chart(family, NETDATA_PROCESS_STATUS_NAME);
+
+ write_chart_dimension(status[0], (long long)pvc->running);
+ write_chart_dimension(status[1], (long long)pvc->zombie);
+
+ write_end_chart();
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ */
+static void ebpf_process_send_data(ebpf_module_t *em)
+{
+ netdata_publish_vfs_common_t pvc;
+ ebpf_update_global_publish(process_publish_aggregated, &pvc, process_aggregated_data);
+
+ write_count_chart(
+ NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
+
+ write_count_chart(
+ NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_DEL_START], 1);
+
+ write_count_chart(
+ NETDATA_VFS_FILE_IO_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_IN_START_BYTE], 2);
+
+ write_count_chart(
+ NETDATA_EXIT_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_EXIT_START], 2);
+ write_count_chart(
+ NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
+
+ write_status_chart(NETDATA_EBPF_FAMILY, &pvc);
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(
+ NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
+ write_err_chart(
+ NETDATA_VFS_FILE_ERR_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[2], NETDATA_VFS_ERRORS);
+ write_err_chart(
+ NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
+ }
+
+ write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_EBPF_FAMILY, process_id_names[3], process_id_names[4], &pvc);
+}
+
+/**
+ * Sum values for pid
+ *
+ * @param root the structure with all available PIDs
+ *
+ * @param offset the address that we are reading
+ *
+ * @return it returns the sum of all PIDs
+ */
+long long ebpf_process_sum_values_for_pids(struct pid_on_target *root, size_t offset)
+{
+ long long ret = 0;
+ while (root) {
+ int32_t pid = root->pid;
+ ebpf_process_publish_apps_t *w = current_apps_data[pid];
+ if (w) {
+ ret += get_value_from_structure((char *)w, offset);
+ }
+
+ root = root->next;
+ }
+
+ return ret;
+}
+
+/**
+ * Remove process pid
+ *
+ * Remove from PID task table when task_release was called.
+ */
+void ebpf_process_remove_pids()
+{
+ struct pid_stat *pids = root_of_pids;
+ int pid_fd = map_fd[0];
+ while (pids) {
+ uint32_t pid = pids->pid;
+ ebpf_process_stat_t *w = global_process_stats[pid];
+ if (w) {
+ if (w->removeme) {
+ freez(w);
+ global_process_stats[pid] = NULL;
+ bpf_map_delete_elem(pid_fd, &pid);
+ }
+ }
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ * @param root the target list.
+ */
+void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
+{
+ struct target *w;
+ collected_number value;
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_open));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_open_error));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_closed));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_close_error));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_deleted));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_write_call));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_write_error));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_read_call));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_read_error));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_write_bytes));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(
+ w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_read_bytes));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_process));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value =
+ ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_thread));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, publish_task));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ ebpf_process_remove_pids();
+}
+
+/*****************************************************************
+ *
+ * READ INFORMATION FROM KERNEL RING
+ *
+ *****************************************************************/
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void read_hash_global_tables()
+{
+ uint64_t idx;
+ netdata_idx_t res[NETDATA_GLOBAL_VECTOR];
+
+ netdata_idx_t *val = process_hash_values;
+ for (idx = 0; idx < NETDATA_GLOBAL_VECTOR; idx++) {
+ if (!bpf_map_lookup_elem(map_fd[1], &idx, val)) {
+ uint64_t total = 0;
+ int i;
+ int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ for (i = 0; i < end; i++)
+ total += val[i];
+
+ res[idx] = total;
+ } else {
+ res[idx] = 0;
+ }
+ }
+
+ process_aggregated_data[0].call = res[NETDATA_KEY_CALLS_DO_SYS_OPEN];
+ process_aggregated_data[1].call = res[NETDATA_KEY_CALLS_CLOSE_FD];
+ process_aggregated_data[2].call = res[NETDATA_KEY_CALLS_VFS_UNLINK];
+ process_aggregated_data[3].call = res[NETDATA_KEY_CALLS_VFS_READ] + res[NETDATA_KEY_CALLS_VFS_READV];
+ process_aggregated_data[4].call = res[NETDATA_KEY_CALLS_VFS_WRITE] + res[NETDATA_KEY_CALLS_VFS_WRITEV];
+ process_aggregated_data[5].call = res[NETDATA_KEY_CALLS_DO_EXIT];
+ process_aggregated_data[6].call = res[NETDATA_KEY_CALLS_RELEASE_TASK];
+ process_aggregated_data[7].call = res[NETDATA_KEY_CALLS_DO_FORK];
+ process_aggregated_data[8].call = res[NETDATA_KEY_CALLS_SYS_CLONE];
+
+ process_aggregated_data[0].ecall = res[NETDATA_KEY_ERROR_DO_SYS_OPEN];
+ process_aggregated_data[1].ecall = res[NETDATA_KEY_ERROR_CLOSE_FD];
+ process_aggregated_data[2].ecall = res[NETDATA_KEY_ERROR_VFS_UNLINK];
+ process_aggregated_data[3].ecall = res[NETDATA_KEY_ERROR_VFS_READ] + res[NETDATA_KEY_ERROR_VFS_READV];
+ process_aggregated_data[4].ecall = res[NETDATA_KEY_ERROR_VFS_WRITE] + res[NETDATA_KEY_ERROR_VFS_WRITEV];
+ process_aggregated_data[7].ecall = res[NETDATA_KEY_ERROR_DO_FORK];
+ process_aggregated_data[8].ecall = res[NETDATA_KEY_ERROR_SYS_CLONE];
+
+ process_aggregated_data[2].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] +
+ (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV];
+ process_aggregated_data[3].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] +
+ (uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
+}
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void ebpf_process_update_apps_data()
+{
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ uint32_t current_pid = pids->pid;
+ ebpf_process_stat_t *ps = global_process_stats[current_pid];
+ if (!ps) {
+ pids = pids->next;
+ continue;
+ }
+
+ ebpf_process_publish_apps_t *cad = current_apps_data[current_pid];
+ ebpf_process_publish_apps_t *pad = prev_apps_data[current_pid];
+ int lstatus;
+ if (!cad) {
+ ebpf_process_publish_apps_t *ptr = callocz(2, sizeof(ebpf_process_publish_apps_t));
+ cad = &ptr[0];
+ current_apps_data[current_pid] = cad;
+ pad = &ptr[1];
+ prev_apps_data[current_pid] = pad;
+ lstatus = 1;
+ } else {
+ memcpy(pad, cad, sizeof(ebpf_process_publish_apps_t));
+ lstatus = 0;
+ }
+
+ //Read data
+ cad->call_sys_open = ps->open_call;
+ cad->call_close_fd = ps->close_call;
+ cad->call_vfs_unlink = ps->unlink_call;
+ cad->call_read = ps->read_call + ps->readv_call;
+ cad->call_write = ps->write_call + ps->writev_call;
+ cad->call_do_exit = ps->exit_call;
+ cad->call_release_task = ps->release_call;
+ cad->call_do_fork = ps->fork_call;
+ cad->call_sys_clone = ps->clone_call;
+
+ cad->ecall_sys_open = ps->open_err;
+ cad->ecall_close_fd = ps->close_err;
+ cad->ecall_vfs_unlink = ps->unlink_err;
+ cad->ecall_read = ps->read_err + ps->readv_err;
+ cad->ecall_write = ps->write_err + ps->writev_err;
+ cad->ecall_do_fork = ps->fork_err;
+ cad->ecall_sys_clone = ps->clone_err;
+
+ cad->bytes_written = (uint64_t)ps->write_bytes + (uint64_t)ps->write_bytes;
+ cad->bytes_read = (uint64_t)ps->read_bytes + (uint64_t)ps->readv_bytes;
+
+ ebpf_process_update_apps_publish(cad, pad, lstatus);
+
+ pids = pids->next;
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Create IO chart
+ *
+ * @param family the chart family
+ * @param name the chart name
+ * @param axis the axis label
+ * @param web the group name used to attach the chart on dashaboard
+ * @param order the order number of the specified chart
+ */
+static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web, int order)
+{
+ printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d\n",
+ family,
+ name,
+ axis,
+ web,
+ order,
+ update_every);
+
+ printf("DIMENSION %s %s absolute 1 1\n", process_id_names[3], NETDATA_VFS_DIM_OUT_FILE_BYTES);
+ printf("DIMENSION %s %s absolute 1 1\n", process_id_names[4], NETDATA_VFS_DIM_IN_FILE_BYTES);
+}
+
+/**
+ * Create process status chart
+ *
+ * @param family the chart family
+ * @param name the chart name
+ * @param axis the axis label
+ * @param web the group name used to attach the chart on dashaboard
+ * @param order the order number of the specified chart
+ */
+static void ebpf_process_status_chart(char *family, char *name, char *axis, char *web, int order)
+{
+ printf("CHART %s.%s '' 'Process not closed' '%s' '%s' '' line %d %d ''\n",
+ family,
+ name,
+ axis,
+ web,
+ order,
+ update_every);
+
+ printf("DIMENSION %s '' absolute 1 1\n", status[0]);
+ printf("DIMENSION %s '' absolute 1 1\n", status[1]);
+}
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static void ebpf_create_global_charts(ebpf_module_t *em)
+{
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_FILE_OPEN_CLOSE_COUNT,
+ "Open and close calls",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_FILE_GROUP,
+ 21000,
+ ebpf_create_global_dimension,
+ process_publish_aggregated,
+ 2);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_FILE_OPEN_ERR_COUNT,
+ "Open fails",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_FILE_GROUP,
+ 21001,
+ ebpf_create_global_dimension,
+ process_publish_aggregated,
+ 2);
+ }
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_VFS_FILE_CLEAN_COUNT,
+ "Remove files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ 21002,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_DEL_START],
+ 1);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_VFS_FILE_IO_COUNT,
+ "Calls to IO",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ 21003,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_IN_START_BYTE],
+ 2);
+
+ ebpf_create_io_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_VFS_IO_FILE_BYTES,
+ EBPF_COMMON_DIMENSION_BYTESS,
+ NETDATA_VFS_GROUP,
+ 21004);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_VFS_FILE_ERR_COUNT,
+ "Fails to write or read",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ 21005,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[2],
+ NETDATA_VFS_ERRORS);
+ }
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_PROCESS_SYSCALL,
+ "Start process",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_PROCESS_GROUP,
+ 21006,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_PROCESS_START],
+ 2);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_EXIT_SYSCALL,
+ "Exit process",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_PROCESS_GROUP,
+ 21007,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_EXIT_START],
+ 2);
+
+ ebpf_process_status_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_PROCESS_STATUS_NAME,
+ EBPF_COMMON_DIMENSION_DIFFERENCE,
+ NETDATA_PROCESS_GROUP,
+ 21008);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_PROCESS_ERROR_NAME,
+ "Fails to create process",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_PROCESS_GROUP,
+ 21009,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_PROCESS_START],
+ 2);
+ }
+}
+
+/**
+ * Create process apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ * @param root a pointer for the targets.
+ */
+static void ebpf_process_create_apps_charts(ebpf_module_t *em, struct target *root)
+{
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN,
+ "Number of open files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ 20061,
+ root);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR,
+ "Fails to open files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ 20062,
+ root);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSED,
+ "Files closed",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ 20063,
+ root);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR,
+ "Fails to close files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ 20064,
+ root);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED,
+ "Files deleted",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20065,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS,
+ "Write to disk",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20066,
+ apps_groups_root_target);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR,
+ "Fails to write",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20067,
+ root);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS,
+ "Read from disk",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20068,
+ root);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR,
+ "Fails to read",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_VFS_GROUP,
+ 20069,
+ root);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES,
+ "Bytes written on disk",
+ EBPF_COMMON_DIMENSION_BYTESS,
+ NETDATA_APPS_VFS_GROUP,
+ 20070,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_BYTES,
+ "Bytes read from disk",
+ EBPF_COMMON_DIMENSION_BYTESS,
+ NETDATA_APPS_VFS_GROUP,
+ 20071,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_PROCESS,
+ "Process started",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_PROCESS_GROUP,
+ 20072,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_THREAD,
+ "Threads started",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_PROCESS_GROUP,
+ 20073,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_CLOSE,
+ "Tasks closed",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_PROCESS_GROUP,
+ 20074,
+ root);
+}
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ * @param root a pointer for the targets.
+ */
+static void ebpf_create_apps_charts(ebpf_module_t *em, struct target *root)
+{
+ struct target *w;
+ int newly_added = 0;
+
+ for (w = root; w; w = w->next) {
+ if (w->target)
+ continue;
+
+ if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
+ struct pid_on_target *pid_on_target;
+
+ fprintf(
+ stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
+ (w->processes == 1) ? "" : "es");
+
+ for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
+ fprintf(stderr, " %d", pid_on_target->pid);
+ }
+
+ fputc('\n', stderr);
+ }
+
+ if (!w->exposed && w->processes) {
+ newly_added++;
+ w->exposed = 1;
+ if (debug_enabled || w->debug_enabled)
+ debug_log_int("%s just added - regenerating charts.", w->name);
+ }
+ }
+
+ if (!newly_added)
+ return;
+
+ if (ebpf_modules[EBPF_MODULE_PROCESS_IDX].apps_charts)
+ ebpf_process_create_apps_charts(em, root);
+
+ if (ebpf_modules[EBPF_MODULE_SOCKET_IDX].apps_charts)
+ ebpf_socket_create_apps_charts(NULL, root);
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS WITH THE MAIN LOOP
+ *
+ *****************************************************************/
+
+/**
+ * Main loop for this collector.
+ *
+ * @param step the number of microseconds used with heart beat
+ * @param em the structure with thread information
+ */
+static void process_collector(usec_t step, ebpf_module_t *em)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ int publish_global = em->global_charts;
+ int apps_enabled = em->apps_charts;
+ int pid_fd = map_fd[0];
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_hash_global_tables();
+
+ pthread_mutex_lock(&collect_data_mutex);
+ cleanup_exited_pids();
+ collect_data_for_all_processes(pid_fd);
+
+ ebpf_create_apps_charts(em, apps_groups_root_target);
+
+ pthread_cond_broadcast(&collect_data_cond_var);
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ int publish_apps = 0;
+ if (apps_enabled && all_pids_count > 0) {
+ publish_apps = 1;
+ ebpf_process_update_apps_data();
+ }
+
+ pthread_mutex_lock(&lock);
+ if (publish_global) {
+ ebpf_process_send_data(em);
+ }
+
+ if (publish_apps) {
+ ebpf_process_send_apps_data(em, apps_groups_root_target);
+ }
+ pthread_mutex_unlock(&lock);
+
+ fflush(stdout);
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+void clean_global_memory() {
+ int pid_fd = map_fd[0];
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ uint32_t pid = pids->pid;
+ freez(global_process_stats[pid]);
+
+ bpf_map_delete_elem(pid_fd, &pid);
+ freez(current_apps_data[pid]);
+
+ pids = pids->next;
+ }
+}
+
+void clean_pid_on_target(struct pid_on_target *ptr) {
+ while (ptr) {
+ struct pid_on_target *next = ptr->next;
+ freez(ptr);
+
+ ptr = next;
+ }
+}
+
+void clean_apps_structures(struct target *ptr) {
+ struct target *agdt = ptr;
+ while (agdt) {
+ struct target *next = agdt->next;
+ clean_pid_on_target(agdt->root_pid);
+ freez(agdt);
+
+ agdt = next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_process_cleanup(void *ptr)
+{
+ UNUSED(ptr);
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 200*USEC_PER_MS;
+ while (!finalized_threads) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(process_aggregated_data);
+ freez(process_publish_aggregated);
+ freez(process_hash_values);
+
+ clean_global_memory();
+ freez(global_process_stats);
+ freez(current_apps_data);
+ freez(prev_apps_data);
+
+ clean_apps_structures(apps_groups_root_target);
+ freez(process_data.map_fd);
+
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO START THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Allocate vectors used with this thread.
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param length is the length for the vectors used inside the collector.
+ */
+static void ebpf_process_allocate_global_vectors(size_t length)
+{
+ process_aggregated_data = callocz(length, sizeof(netdata_syscall_stat_t));
+ process_publish_aggregated = callocz(length, sizeof(netdata_publish_syscall_t));
+ process_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+
+ global_process_stats = callocz((size_t)pid_max, sizeof(ebpf_process_stat_t *));
+ current_apps_data = callocz((size_t)pid_max, sizeof(ebpf_process_publish_apps_t *));
+ prev_apps_data = callocz((size_t)pid_max, sizeof(ebpf_process_publish_apps_t *));
+}
+
+static void change_syscalls()
+{
+ static char *lfork = { "do_fork" };
+ process_id_names[7] = lfork;
+}
+
+/**
+ * Set local variables
+ *
+ */
+static void set_local_pointers()
+{
+ map_fd = process_data.map_fd;
+
+ if (process_data.isrh >= NETDATA_MINIMUM_RH_VERSION && process_data.isrh < NETDATA_RH_8)
+ change_syscalls();
+}
+
+/*****************************************************************
+ *
+ * EBPF PROCESS THREAD
+ *
+ *****************************************************************/
+
+/**
+ *
+ */
+static void wait_for_all_threads_die()
+{
+ ebpf_modules[EBPF_MODULE_PROCESS_IDX].enabled = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ int max = 10;
+ int i;
+ for (i = 0; i < max; i++) {
+ heartbeat_next(&hb, 200000);
+
+ size_t j, counter = 0, compare = 0;
+ for (j = 0; ebpf_modules[j].thread_name; j++) {
+ if (!ebpf_modules[j].enabled)
+ counter++;
+
+ compare++;
+ }
+
+ if (counter == compare)
+ break;
+ }
+}
+
+/**
+ * Process thread
+ *
+ * Thread used to generate process charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_process_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_process_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ process_enabled = em->enabled;
+ fill_ebpf_data(&process_data);
+
+ pthread_mutex_lock(&lock);
+ ebpf_process_allocate_global_vectors(NETDATA_MAX_MONITOR_VECTOR);
+
+ if (ebpf_update_kernel(&process_data)) {
+ pthread_mutex_unlock(&lock);
+ goto endprocess;
+ }
+
+ set_local_pointers();
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, process_data.map_fd);
+ if (!probe_links) {
+ pthread_mutex_unlock(&lock);
+ goto endprocess;
+ }
+
+ ebpf_global_labels(
+ process_aggregated_data, process_publish_aggregated, process_dimension_names, process_id_names,
+ NETDATA_MAX_MONITOR_VECTOR);
+
+ if (process_enabled) {
+ ebpf_create_global_charts(em);
+ }
+
+ pthread_mutex_unlock(&lock);
+
+ process_collector((usec_t)(em->update_time * USEC_PER_SEC), em);
+
+endprocess:
+ wait_for_all_threads_die();
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
new file mode 100644
index 000000000..9553434b0
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_process.h
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_PROCESS_H
+#define NETDATA_EBPF_PROCESS_H 1
+
+// Groups used on Dashboard
+#define NETDATA_FILE_GROUP "File"
+#define NETDATA_VFS_GROUP "VFS"
+#define NETDATA_PROCESS_GROUP "Process"
+
+// Internal constants
+#define NETDATA_GLOBAL_VECTOR 24
+#define NETDATA_MAX_MONITOR_VECTOR 9
+#define NETDATA_VFS_ERRORS 3
+
+// Map index
+#define NETDATA_DEL_START 2
+#define NETDATA_IN_START_BYTE 3
+#define NETDATA_EXIT_START 5
+#define NETDATA_PROCESS_START 7
+
+// Global chart name
+#define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor"
+#define NETDATA_FILE_OPEN_ERR_COUNT "file_error"
+#define NETDATA_VFS_FILE_CLEAN_COUNT "deleted_objects"
+#define NETDATA_VFS_FILE_IO_COUNT "io"
+#define NETDATA_VFS_FILE_ERR_COUNT "io_error"
+
+#define NETDATA_EXIT_SYSCALL "exit"
+#define NETDATA_PROCESS_SYSCALL "process_thread"
+#define NETDATA_PROCESS_ERROR_NAME "task_error"
+#define NETDATA_PROCESS_STATUS_NAME "process_status"
+
+#define NETDATA_VFS_IO_FILE_BYTES "io_bytes"
+#define NETDATA_VFS_DIM_IN_FILE_BYTES "write"
+#define NETDATA_VFS_DIM_OUT_FILE_BYTES "read"
+
+// Charts created on Apps submenu
+#define NETDATA_SYSCALL_APPS_FILE_OPEN "file_open"
+#define NETDATA_SYSCALL_APPS_FILE_CLOSED "file_closed"
+#define NETDATA_SYSCALL_APPS_FILE_DELETED "file_deleted"
+#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS "vfs_write_call"
+#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS "vfs_read_call"
+#define NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES "vfs_write_bytes"
+#define NETDATA_SYSCALL_APPS_VFS_READ_BYTES "vfs_read_bytes"
+#define NETDATA_SYSCALL_APPS_TASK_PROCESS "process_create"
+#define NETDATA_SYSCALL_APPS_TASK_THREAD "thread_create"
+#define NETDATA_SYSCALL_APPS_TASK_CLOSE "task_close"
+
+// Charts created on Apps submenu, if and only if, the return mode is active
+
+#define NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR "file_open_error"
+#define NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR "file_close_error"
+#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR "vfs_write_error"
+#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR "vfs_read_error"
+
+// Index from kernel
+typedef enum ebpf_process_index {
+ NETDATA_KEY_CALLS_DO_SYS_OPEN,
+ NETDATA_KEY_ERROR_DO_SYS_OPEN,
+
+ NETDATA_KEY_CALLS_VFS_WRITE,
+ NETDATA_KEY_ERROR_VFS_WRITE,
+ NETDATA_KEY_BYTES_VFS_WRITE,
+
+ NETDATA_KEY_CALLS_VFS_READ,
+ NETDATA_KEY_ERROR_VFS_READ,
+ NETDATA_KEY_BYTES_VFS_READ,
+
+ NETDATA_KEY_CALLS_VFS_UNLINK,
+ NETDATA_KEY_ERROR_VFS_UNLINK,
+
+ NETDATA_KEY_CALLS_DO_EXIT,
+
+ NETDATA_KEY_CALLS_RELEASE_TASK,
+
+ NETDATA_KEY_CALLS_DO_FORK,
+ NETDATA_KEY_ERROR_DO_FORK,
+
+ NETDATA_KEY_CALLS_CLOSE_FD,
+ NETDATA_KEY_ERROR_CLOSE_FD,
+
+ NETDATA_KEY_CALLS_SYS_CLONE,
+ NETDATA_KEY_ERROR_SYS_CLONE,
+
+ NETDATA_KEY_CALLS_VFS_WRITEV,
+ NETDATA_KEY_ERROR_VFS_WRITEV,
+ NETDATA_KEY_BYTES_VFS_WRITEV,
+
+ NETDATA_KEY_CALLS_VFS_READV,
+ NETDATA_KEY_ERROR_VFS_READV,
+ NETDATA_KEY_BYTES_VFS_READV
+
+} ebpf_process_index_t;
+
+typedef struct ebpf_process_publish_apps {
+ // Number of calls during the last read
+ uint64_t call_sys_open;
+ uint64_t call_close_fd;
+ uint64_t call_vfs_unlink;
+ uint64_t call_read;
+ uint64_t call_write;
+ uint64_t call_do_exit;
+ uint64_t call_release_task;
+ uint64_t call_do_fork;
+ uint64_t call_sys_clone;
+
+ // Number of errors during the last read
+ uint64_t ecall_sys_open;
+ uint64_t ecall_close_fd;
+ uint64_t ecall_vfs_unlink;
+ uint64_t ecall_read;
+ uint64_t ecall_write;
+ uint64_t ecall_do_fork;
+ uint64_t ecall_sys_clone;
+
+ // Number of bytes during the last read
+ uint64_t bytes_written;
+ uint64_t bytes_read;
+
+ // Dimensions sent to chart
+ uint64_t publish_open;
+ uint64_t publish_closed;
+ uint64_t publish_deleted;
+ uint64_t publish_write_call;
+ uint64_t publish_write_bytes;
+ uint64_t publish_read_call;
+ uint64_t publish_read_bytes;
+ uint64_t publish_process;
+ uint64_t publish_thread;
+ uint64_t publish_task;
+ uint64_t publish_open_error;
+ uint64_t publish_close_error;
+ uint64_t publish_write_error;
+ uint64_t publish_read_error;
+} ebpf_process_publish_apps_t;
+
+#endif /* NETDATA_EBPF_PROCESS_H */
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
new file mode 100644
index 000000000..2f73cf4dd
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -0,0 +1,1938 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/resource.h>
+
+#include "ebpf.h"
+#include "ebpf_socket.h"
+
+/*****************************************************************
+ *
+ * GLOBAL VARIABLES
+ *
+ *****************************************************************/
+
+static char *socket_dimension_names[NETDATA_MAX_SOCKET_VECTOR] = { "sent", "received", "close", "sent",
+ "received", "retransmitted" };
+static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_sendmsg", "tcp_cleanup_rbuf", "tcp_close",
+ "udp_sendmsg", "udp_recvmsg", "tcp_retransmit_skb" };
+
+static netdata_idx_t *socket_hash_values = NULL;
+static netdata_syscall_stat_t *socket_aggregated_data = NULL;
+static netdata_publish_syscall_t *socket_publish_aggregated = NULL;
+
+static ebpf_data_t socket_data;
+
+ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
+ebpf_socket_publish_apps_t **socket_bandwidth_prev = NULL;
+static ebpf_bandwidth_t *bandwidth_vector = NULL;
+
+static int socket_apps_created = 0;
+pthread_mutex_t nv_mutex;
+int wait_to_plot = 0;
+int read_thread_closed = 1;
+
+netdata_vector_plot_t inbound_vectors = { .plot = NULL, .next = 0, .last = 0 };
+netdata_vector_plot_t outbound_vectors = { .plot = NULL, .next = 0, .last = 0 };
+netdata_socket_t *socket_values;
+
+ebpf_network_viewer_port_list_t *listen_ports = NULL;
+
+static int *map_fd = NULL;
+static struct bpf_object *objects = NULL;
+static struct bpf_link **probe_links = NULL;
+
+/*****************************************************************
+ *
+ * PROCESS DATA AND SEND TO NETDATA
+ *
+ *****************************************************************/
+
+/**
+ * Update publish structure before to send data to Netdata.
+ *
+ * @param publish the first output structure with independent dimensions
+ * @param tcp structure to store IO from tcp sockets
+ * @param udp structure to store IO from udp sockets
+ * @param input the structure with the input data.
+ */
+static void ebpf_update_global_publish(
+ netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *tcp, netdata_publish_vfs_common_t *udp,
+ netdata_syscall_stat_t *input)
+{
+ netdata_publish_syscall_t *move = publish;
+ while (move) {
+ if (input->call != move->pcall) {
+ // This condition happens to avoid initial values with dimensions higher than normal values.
+ if (move->pcall) {
+ move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call;
+ move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes;
+ move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall;
+ } else {
+ move->ncall = 0;
+ move->nbyte = 0;
+ move->nerr = 0;
+ }
+
+ move->pcall = input->call;
+ move->pbyte = input->bytes;
+ move->perr = input->ecall;
+ } else {
+ move->ncall = 0;
+ move->nbyte = 0;
+ move->nerr = 0;
+ }
+
+ input = input->next;
+ move = move->next;
+ }
+
+ tcp->write = -((long)publish[0].nbyte);
+ tcp->read = (long)publish[1].nbyte;
+
+ udp->write = -((long)publish[3].nbyte);
+ udp->read = (long)publish[4].nbyte;
+}
+
+/**
+ * Update Network Viewer plot data
+ *
+ * @param plot the structure where the data will be stored
+ * @param sock the last update from the socket
+ */
+static inline void update_nv_plot_data(netdata_plot_values_t *plot, netdata_socket_t *sock)
+{
+ if (sock->ct > plot->last_time) {
+ plot->last_time = sock->ct;
+ plot->plot_recv_packets = sock->recv_packets;
+ plot->plot_sent_packets = sock->sent_packets;
+ plot->plot_recv_bytes = sock->recv_bytes;
+ plot->plot_sent_bytes = sock->sent_bytes;
+ plot->plot_retransmit = sock->retransmit;
+ }
+
+ sock->recv_packets = 0;
+ sock->sent_packets = 0;
+ sock->recv_bytes = 0;
+ sock->sent_bytes = 0;
+ sock->retransmit = 0;
+}
+
+/**
+ * Calculate Network Viewer Plot
+ *
+ * Do math with collected values before to plot data.
+ */
+static inline void calculate_nv_plot()
+{
+ uint32_t i;
+ uint32_t end = inbound_vectors.next;
+ for (i = 0; i < end; i++) {
+ update_nv_plot_data(&inbound_vectors.plot[i].plot, &inbound_vectors.plot[i].sock);
+ }
+ inbound_vectors.max_plot = end;
+
+ // The 'Other' dimension is always calculated for the chart to have at least one dimension
+ update_nv_plot_data(&inbound_vectors.plot[inbound_vectors.last].plot,
+ &inbound_vectors.plot[inbound_vectors.last].sock);
+
+ end = outbound_vectors.next;
+ for (i = 0; i < end; i++) {
+ update_nv_plot_data(&outbound_vectors.plot[i].plot, &outbound_vectors.plot[i].sock);
+ }
+ outbound_vectors.max_plot = end;
+
+ // The 'Other' dimension is always calculated for the chart to have at least one dimension
+ update_nv_plot_data(&outbound_vectors.plot[outbound_vectors.last].plot,
+ &outbound_vectors.plot[outbound_vectors.last].sock);
+}
+
+/**
+ * Network viewer send bytes
+ *
+ * @param ptr the structure with values to plot
+ * @param chart the chart name.
+ */
+static inline void ebpf_socket_nv_send_bytes(netdata_vector_plot_t *ptr, char *chart)
+{
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ collected_number value;
+
+ write_begin_chart(NETDATA_EBPF_FAMILY, chart);
+ for (i = 0; i < end; i++) {
+ value = ((collected_number) w[i].plot.plot_sent_bytes);
+ write_chart_dimension(w[i].dimension_sent, value);
+ value = (collected_number) w[i].plot.plot_recv_bytes;
+ write_chart_dimension(w[i].dimension_recv, value);
+ }
+
+ i = ptr->last;
+ value = ((collected_number) w[i].plot.plot_sent_bytes);
+ write_chart_dimension(w[i].dimension_sent, value);
+ value = (collected_number) w[i].plot.plot_recv_bytes;
+ write_chart_dimension(w[i].dimension_recv, value);
+ write_end_chart();
+}
+
+/**
+ * Network Viewer Send packets
+ *
+ * @param ptr the structure with values to plot
+ * @param chart the chart name.
+ */
+static inline void ebpf_socket_nv_send_packets(netdata_vector_plot_t *ptr, char *chart)
+{
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ collected_number value;
+
+ write_begin_chart(NETDATA_EBPF_FAMILY, chart);
+ for (i = 0; i < end; i++) {
+ value = ((collected_number)w[i].plot.plot_sent_packets);
+ write_chart_dimension(w[i].dimension_sent, value);
+ value = (collected_number) w[i].plot.plot_recv_packets;
+ write_chart_dimension(w[i].dimension_recv, value);
+ }
+
+ i = ptr->last;
+ value = ((collected_number)w[i].plot.plot_sent_packets);
+ write_chart_dimension(w[i].dimension_sent, value);
+ value = (collected_number)w[i].plot.plot_recv_packets;
+ write_chart_dimension(w[i].dimension_recv, value);
+ write_end_chart();
+}
+
+/**
+ * Network Viewer Send Retransmit
+ *
+ * @param ptr the structure with values to plot
+ * @param chart the chart name.
+ */
+static inline void ebpf_socket_nv_send_retransmit(netdata_vector_plot_t *ptr, char *chart)
+{
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ collected_number value;
+
+ write_begin_chart(NETDATA_EBPF_FAMILY, chart);
+ for (i = 0; i < end; i++) {
+ value = (collected_number) w[i].plot.plot_retransmit;
+ write_chart_dimension(w[i].dimension_retransmit, value);
+ }
+
+ i = ptr->last;
+ value = (collected_number)w[i].plot.plot_retransmit;
+ write_chart_dimension(w[i].dimension_retransmit, value);
+ write_end_chart();
+}
+
+/**
+ * Send network viewer data
+ *
+ * @param ptr the pointer to plot data
+ */
+static void ebpf_socket_send_nv_data(netdata_vector_plot_t *ptr)
+{
+ if (!ptr->flags)
+ return;
+
+ if (ptr == (netdata_vector_plot_t *)&outbound_vectors) {
+ ebpf_socket_nv_send_bytes(ptr, NETDATA_NV_OUTBOUND_BYTES);
+ fflush(stdout);
+
+ ebpf_socket_nv_send_packets(ptr, NETDATA_NV_OUTBOUND_PACKETS);
+ fflush(stdout);
+
+ ebpf_socket_nv_send_retransmit(ptr, NETDATA_NV_OUTBOUND_RETRANSMIT);
+ fflush(stdout);
+ } else {
+ ebpf_socket_nv_send_bytes(ptr, NETDATA_NV_INBOUND_BYTES);
+ fflush(stdout);
+
+ ebpf_socket_nv_send_packets(ptr, NETDATA_NV_INBOUND_PACKETS);
+ fflush(stdout);
+ }
+}
+
+
+/**
+ * Update the publish strctures to create the dimenssions
+ *
+ * @param curr Last values read from memory.
+ * @param prev Previous values read from memory.
+ */
+static void ebpf_socket_update_apps_publish(ebpf_socket_publish_apps_t *curr, ebpf_socket_publish_apps_t *prev)
+{
+ curr->publish_received_bytes = curr->bytes_received - prev->bytes_received;
+ curr->publish_sent_bytes = curr->bytes_sent - prev->bytes_sent;
+ curr->publish_tcp_sent = curr->call_tcp_sent - prev->call_tcp_sent;
+ curr->publish_tcp_received = curr->call_tcp_received - prev->call_tcp_received;
+ curr->publish_retransmit = curr->retransmit - prev->retransmit;
+ curr->publish_udp_sent = curr->call_udp_sent - prev->call_udp_sent;
+ curr->publish_udp_received = curr->call_udp_received - prev->call_udp_received;
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ */
+static void ebpf_socket_send_data(ebpf_module_t *em)
+{
+ netdata_publish_vfs_common_t common_tcp;
+ netdata_publish_vfs_common_t common_udp;
+ ebpf_update_global_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data);
+
+ write_count_chart(
+ NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 3);
+ write_io_chart(
+ NETDATA_TCP_FUNCTION_BYTES, NETDATA_EBPF_FAMILY, socket_id_names[0], socket_id_names[1], &common_tcp);
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(
+ NETDATA_TCP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 2);
+ }
+ write_count_chart(
+ NETDATA_TCP_RETRANSMIT, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_RETRANSMIT_START], 1);
+
+ write_count_chart(
+ NETDATA_UDP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_UDP_START], 2);
+ write_io_chart(
+ NETDATA_UDP_FUNCTION_BYTES, NETDATA_EBPF_FAMILY, socket_id_names[3], socket_id_names[4], &common_udp);
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(
+ NETDATA_UDP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_UDP_START], 2);
+ }
+}
+
+/**
+ * Sum values for pid
+ *
+ * @param root the structure with all available PIDs
+ *
+ * @param offset the address that we are reading
+ *
+ * @return it returns the sum of all PIDs
+ */
+long long ebpf_socket_sum_values_for_pids(struct pid_on_target *root, size_t offset)
+{
+ long long ret = 0;
+ while (root) {
+ int32_t pid = root->pid;
+ ebpf_socket_publish_apps_t *w = socket_bandwidth_curr[pid];
+ if (w) {
+ ret += get_value_from_structure((char *)w, offset);
+ }
+
+ root = root->next;
+ }
+
+ return ret;
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ * @param root the target list.
+ */
+void ebpf_socket_send_apps_data(ebpf_module_t *em, struct target *root)
+{
+ UNUSED(em);
+ if (!socket_apps_created)
+ return;
+
+ struct target *w;
+ collected_number value;
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_SENT);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ publish_sent_bytes));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_RECV);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ publish_received_bytes));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ publish_tcp_sent));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ publish_tcp_received));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ publish_retransmit));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ publish_udp_sent));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ value = ebpf_socket_sum_values_for_pids(w->root_pid, offsetof(ebpf_socket_publish_apps_t,
+ publish_udp_received));
+ write_chart_dimension(w->name, value);
+ }
+ }
+ write_end_chart();
+
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static void ebpf_create_global_charts(ebpf_module_t *em)
+{
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_TCP_FUNCTION_COUNT,
+ "Calls to internal functions",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21070,
+ ebpf_create_global_dimension,
+ socket_publish_aggregated,
+ 3);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_TCP_FUNCTION_BYTES,
+ "TCP bandwidth",
+ EBPF_COMMON_DIMENSION_BYTESS,
+ NETDATA_SOCKET_GROUP,
+ 21071,
+ ebpf_create_global_dimension,
+ socket_publish_aggregated,
+ 3);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_TCP_FUNCTION_ERROR,
+ "TCP errors",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21072,
+ ebpf_create_global_dimension,
+ socket_publish_aggregated,
+ 2);
+ }
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_TCP_RETRANSMIT,
+ "Packages retransmitted",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21073,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_RETRANSMIT_START],
+ 1);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_UDP_FUNCTION_COUNT,
+ "UDP calls",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21074,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_UDP_START],
+ 2);
+
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_UDP_FUNCTION_BYTES,
+ "UDP bandwidth",
+ EBPF_COMMON_DIMENSION_BYTESS,
+ NETDATA_SOCKET_GROUP,
+ 21075,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_UDP_START],
+ 2);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ NETDATA_UDP_FUNCTION_ERROR,
+ "UDP errors",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SOCKET_GROUP,
+ 21076,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_UDP_START],
+ 2);
+ }
+}
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_socket_create_apps_charts(ebpf_module_t *em, struct target *root)
+{
+ UNUSED(em);
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_SENT,
+ "Bytes sent",
+ EBPF_COMMON_DIMENSION_BYTESS,
+ NETDATA_APPS_NET_GROUP,
+ 20080,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_RECV,
+ "bytes received",
+ EBPF_COMMON_DIMENSION_BYTESS,
+ NETDATA_APPS_NET_GROUP,
+ 20081,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
+ "Calls for tcp_sendmsg",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20082,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
+ "Calls for tcp_cleanup_rbuf",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20083,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
+ "Calls for tcp_retransmit",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20084,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
+ "Calls for udp_sendmsg",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20085,
+ root);
+
+ ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
+ "Calls for udp_recvmsg",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ 20086,
+ root);
+
+ socket_apps_created = 1;
+}
+
+/**
+ * Create network viewer chart
+ *
+ * Create common charts.
+ *
+ * @param id the chart id
+ * @param title the chart title
+ * @param units the units label
+ * @param family the group name used to attach the chart on dashaboard
+ * @param order the chart order
+ * @param ptr the plot structure with values.
+ */
+static void ebpf_socket_create_nv_chart(char *id, char *title, char *units,
+ char *family, int order, netdata_vector_plot_t *ptr)
+{
+ ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY,
+ id,
+ title,
+ units,
+ family,
+ "stacked",
+ order);
+
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ for (i = 0; i < end; i++) {
+ fprintf(stdout, "DIMENSION %s '' incremental -1 1\n", w[i].dimension_sent);
+ fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[i].dimension_recv);
+ }
+
+ end = ptr->last;
+ fprintf(stdout, "DIMENSION %s '' incremental -1 1\n", w[end].dimension_sent);
+ fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[end].dimension_recv);
+}
+
+/**
+ * Create network viewer retransmit
+ *
+ * Create a specific chart.
+ *
+ * @param id the chart id
+ * @param title the chart title
+ * @param units the units label
+ * @param family the group name used to attach the chart on dashaboard
+ * @param order the chart order
+ * @param ptr the plot structure with values.
+ */
+static void ebpf_socket_create_nv_retransmit(char *id, char *title, char *units,
+ char *family, int order, netdata_vector_plot_t *ptr)
+{
+ ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY,
+ id,
+ title,
+ units,
+ family,
+ "stacked",
+ order);
+
+ uint32_t i;
+ uint32_t end = ptr->last_plot;
+ netdata_socket_plot_t *w = ptr->plot;
+ for (i = 0; i < end; i++) {
+ fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[i].dimension_retransmit);
+ }
+
+ end = ptr->last;
+ fprintf(stdout, "DIMENSION %s '' incremental 1 1\n", w[end].dimension_retransmit);
+}
+
+/**
+ * Create Network Viewer charts
+ *
+ * Recreate the charts when new sockets are created.
+ *
+ * @param ptr a pointer for inbound or outbound vectors.
+ */
+static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr)
+{
+ // We do not have new sockets, so we do not need move forward
+ if (ptr->max_plot == ptr->last_plot)
+ return;
+
+ ptr->last_plot = ptr->max_plot;
+
+ if (ptr == (netdata_vector_plot_t *)&outbound_vectors) {
+ ebpf_socket_create_nv_chart(NETDATA_NV_OUTBOUND_BYTES,
+ "Outbound connections (bytes).",
+ EBPF_COMMON_DIMENSION_BYTESS,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21080,
+ ptr);
+
+ ebpf_socket_create_nv_chart(NETDATA_NV_OUTBOUND_PACKETS,
+ "Outbound connections (packets)",
+ EBPF_COMMON_DIMENSION_PACKETS,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21082,
+ ptr);
+
+ ebpf_socket_create_nv_retransmit(NETDATA_NV_OUTBOUND_RETRANSMIT,
+ "Retransmitted packets",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21083,
+ ptr);
+ } else {
+ ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_BYTES,
+ "Inbound connections (bytes)",
+ EBPF_COMMON_DIMENSION_BYTESS,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21084,
+ ptr);
+
+ ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_PACKETS,
+ "Inbound connections (packets)",
+ EBPF_COMMON_DIMENSION_PACKETS,
+ NETDATA_NETWORK_CONNECTIONS_GROUP,
+ 21085,
+ ptr);
+ }
+
+ ptr->flags |= NETWORK_VIEWER_CHARTS_CREATED;
+}
+
+/*****************************************************************
+ *
+ * READ INFORMATION FROM KERNEL RING
+ *
+ *****************************************************************/
+
+/**
+ * Is specific ip inside the range
+ *
+ * Check if the ip is inside a IP range previously defined
+ *
+ * @param cmp the IP to compare
+ * @param family the IP family
+ *
+ * @return It returns 1 if the IP is inside the range and 0 otherwise
+ */
+static int is_specific_ip_inside_range(union netdata_ip_t *cmp, int family)
+{
+ if (!network_viewer_opt.excluded_ips && !network_viewer_opt.included_ips)
+ return 1;
+
+ uint32_t ipv4_test = ntohl(cmp->addr32[0]);
+ ebpf_network_viewer_ip_list_t *move = network_viewer_opt.excluded_ips;
+ while (move) {
+ if (family == AF_INET) {
+ if (ntohl(move->first.addr32[0]) <= ipv4_test &&
+ ipv4_test <= ntohl(move->last.addr32[0]) )
+ return 0;
+ } else {
+ if (memcmp(move->first.addr8, cmp->addr8, sizeof(union netdata_ip_t)) <= 0 &&
+ memcmp(move->last.addr8, cmp->addr8, sizeof(union netdata_ip_t)) >= 0) {
+ return 0;
+ }
+ }
+ move = move->next;
+ }
+
+ move = network_viewer_opt.included_ips;
+ while (move) {
+ if (family == AF_INET) {
+ if (ntohl(move->first.addr32[0]) <= ipv4_test &&
+ ntohl(move->last.addr32[0]) >= ipv4_test)
+ return 1;
+ } else {
+ if (memcmp(move->first.addr8, cmp->addr8, sizeof(union netdata_ip_t)) <= 0 &&
+ memcmp(move->last.addr8, cmp->addr8, sizeof(union netdata_ip_t)) >= 0) {
+ return 1;
+ }
+ }
+ move = move->next;
+ }
+
+ return 0;
+}
+
+/**
+ * Is port inside range
+ *
+ * Verify if the cmp port is inside the range [first, last].
+ * This function expects only the last parameter as big endian.
+ *
+ * @param cmp the value to compare
+ *
+ * @return It returns 1 when cmp is inside and 0 otherwise.
+ */
+static int is_port_inside_range(uint16_t cmp)
+{
+ // We do not have restrictions for ports.
+ if (!network_viewer_opt.excluded_port && !network_viewer_opt.included_port)
+ return 1;
+
+ // Test if port is excluded
+ ebpf_network_viewer_port_list_t *move = network_viewer_opt.excluded_port;
+ cmp = htons(cmp);
+ while (move) {
+ if (move->cmp_first <= cmp && cmp <= move->cmp_last)
+ return 0;
+
+ move = move->next;
+ }
+
+ // Test if the port is inside allowed range
+ move = network_viewer_opt.included_port;
+ while (move) {
+ if (move->cmp_first <= cmp && cmp <= move->cmp_last)
+ return 1;
+
+ move = move->next;
+ }
+
+ return 0;
+}
+
+/**
+ * Hostname matches pattern
+ *
+ * @param cmp the value to compare
+ *
+ * @return It returns 1 when the value matches and zero otherwise.
+ */
+int hostname_matches_pattern(char *cmp)
+{
+ if (!network_viewer_opt.included_hostnames && !network_viewer_opt.excluded_hostnames)
+ return 1;
+
+ ebpf_network_viewer_hostname_list_t *move = network_viewer_opt.excluded_hostnames;
+ while (move) {
+ if (simple_pattern_matches(move->value_pattern, cmp))
+ return 0;
+
+ move = move->next;
+ }
+
+ move = network_viewer_opt.included_hostnames;
+ while (move) {
+ if (simple_pattern_matches(move->value_pattern, cmp))
+ return 1;
+
+ move = move->next;
+ }
+
+
+ return 0;
+}
+
+/**
+ * Is socket allowed?
+ *
+ * Compare destination addresses and destination ports to define next steps
+ *
+ * @param key the socket read from kernel ring
+ * @param family the family used to compare IPs (AF_INET and AF_INET6)
+ *
+ * @return It returns 1 if this socket is inside the ranges and 0 otherwise.
+ */
+int is_socket_allowed(netdata_socket_idx_t *key, int family)
+{
+ if (!is_port_inside_range(key->dport))
+ return 0;
+
+ return is_specific_ip_inside_range(&key->daddr, family);
+}
+
+/**
+ * Compare sockets
+ *
+ * Compare destination address and destination port.
+ * We do not compare source port, because it is random.
+ * We also do not compare source address, because inbound and outbound connections are stored in separated AVL trees.
+ *
+ * @param a pointer to netdata_socket_plot
+ * @param b pointer to netdata_socket_plot
+ *
+ * @return It returns 0 case the values are equal, 1 case a is bigger than b and -1 case a is smaller than b.
+ */
+static int compare_sockets(void *a, void *b)
+{
+ struct netdata_socket_plot *val1 = a;
+ struct netdata_socket_plot *val2 = b;
+ int cmp;
+
+ // We do not need to compare val2 family, because data inside hash table is always from the same family
+ if (val1->family == AF_INET) { //IPV4
+ if (val1->flags & NETDATA_INBOUND_DIRECTION) {
+ if (val1->index.sport == val2->index.sport)
+ cmp = 0;
+ else {
+ cmp = (val1->index.sport > val2->index.sport)?1:-1;
+ }
+ } else {
+ cmp = memcmp(&val1->index.dport, &val2->index.dport, sizeof(uint16_t));
+ if (!cmp) {
+ cmp = memcmp(&val1->index.daddr.addr32[0], &val2->index.daddr.addr32[0], sizeof(uint32_t));
+ }
+ }
+ } else {
+ if (val1->flags & NETDATA_INBOUND_DIRECTION) {
+ if (val1->index.sport == val2->index.sport)
+ cmp = 0;
+ else {
+ cmp = (val1->index.sport > val2->index.sport)?1:-1;
+ }
+ } else {
+ cmp = memcmp(&val1->index.dport, &val2->index.dport, sizeof(uint16_t));
+ if (!cmp) {
+ cmp = memcmp(&val1->index.daddr.addr32, &val2->index.daddr.addr32, 4*sizeof(uint32_t));
+ }
+ }
+ }
+
+ return cmp;
+}
+
+/**
+ * Build dimension name
+ *
+ * Fill dimension name vector with values given
+ *
+ * @param dimname the output vector
+ * @param hostname the hostname for the socket.
+ * @param service_name the service used to connect.
+ * @param proto the protocol used in this connection
+ * @param family is this IPV4(AF_INET) or IPV6(AF_INET6)
+ *
+ * @return it returns the size of the data copied on success and -1 otherwise.
+ */
+static inline int build_outbound_dimension_name(char *dimname, char *hostname, char *service_name,
+ char *proto, int family)
+{
+ return snprintf(dimname, CONFIG_MAX_NAME - 7, (family == AF_INET)?"%s:%s:%s_":"%s:%s:[%s]_",
+ service_name, proto,
+ hostname);
+}
+
+/**
+ * Fill inbound dimension name
+ *
+ * Mount the dimension name with the input given
+ *
+ * @param dimname the output vector
+ * @param service_name the service used to connect.
+ * @param proto the protocol used in this connection
+ *
+ * @return it returns the size of the data copied on success and -1 otherwise.
+ */
+static inline int build_inbound_dimension_name(char *dimname, char *service_name, char *proto)
+{
+ return snprintf(dimname, CONFIG_MAX_NAME - 7, "%s:%s_", service_name,
+ proto);
+}
+
+/**
+ * Fill Resolved Name
+ *
+ * Fill the resolved name structure with the value given.
+ * The hostname is the largest value possible, if it is necessary to cut some value, it must be cut.
+ *
+ * @param ptr the output vector
+ * @param hostname the hostname resolved or IP.
+ * @param length the length for the hostname.
+ * @param service_name the service name associated to the connection
+ * @param is_outbound the is this an outbound connection
+ */
+static inline void fill_resolved_name(netdata_socket_plot_t *ptr, char *hostname, size_t length,
+ char *service_name, int is_outbound)
+{
+ if (length < NETDATA_MAX_NETWORK_COMBINED_LENGTH)
+ ptr->resolved_name = strdupz(hostname);
+ else {
+ length = NETDATA_MAX_NETWORK_COMBINED_LENGTH;
+ ptr->resolved_name = mallocz( NETDATA_MAX_NETWORK_COMBINED_LENGTH + 1);
+ memcpy(ptr->resolved_name, hostname, length);
+ ptr->resolved_name[length] = '\0';
+ }
+
+ char dimname[CONFIG_MAX_NAME];
+ int size;
+ char *protocol;
+ if (ptr->sock.protocol == IPPROTO_UDP) {
+ protocol = "UDP";
+ } else if (ptr->sock.protocol == IPPROTO_TCP) {
+ protocol = "TCP";
+ } else {
+ protocol = "ALL";
+ }
+
+ if (is_outbound)
+ size = build_outbound_dimension_name(dimname, hostname, service_name, protocol, ptr->family);
+ else
+ size = build_inbound_dimension_name(dimname,service_name, protocol);
+
+ if (size > 0) {
+ strcpy(&dimname[size], "sent");
+ dimname[size + 4] = '\0';
+ ptr->dimension_sent = strdupz(dimname);
+
+ strcpy(&dimname[size], "recv");
+ ptr->dimension_recv = strdupz(dimname);
+
+ dimname[size - 1] = '\0';
+ ptr->dimension_retransmit = strdupz(dimname);
+ }
+}
+
+/**
+ * Mount dimension names
+ *
+ * Fill the vector names after to resolve the addresses
+ *
+ * @param ptr a pointer to the structure where the values are stored.
+ * @param is_outbound is a outbound ptr value?
+ *
+ * @return It returns 1 if the name is valid and 0 otherwise.
+ */
+int fill_names(netdata_socket_plot_t *ptr, int is_outbound)
+{
+ char hostname[NI_MAXHOST], service_name[NI_MAXSERV];
+ if (ptr->resolved)
+ return 1;
+
+ int ret;
+ static int resolve_name = -1;
+ static int resolve_service = -1;
+ if (resolve_name == -1)
+ resolve_name = network_viewer_opt.hostname_resolution_enabled;
+
+ if (resolve_service == -1)
+ resolve_service = network_viewer_opt.service_resolution_enabled;
+
+ netdata_socket_idx_t *idx = &ptr->index;
+
+ char *errname = { "Not resolved" };
+ // Resolve Name
+ if (ptr->family == AF_INET) { //IPV4
+ struct sockaddr_in myaddr;
+ memset(&myaddr, 0 , sizeof(myaddr));
+
+ myaddr.sin_family = ptr->family;
+ if (is_outbound) {
+ myaddr.sin_port = idx->dport;
+ myaddr.sin_addr.s_addr = idx->daddr.addr32[0];
+ } else {
+ myaddr.sin_port = idx->sport;
+ myaddr.sin_addr.s_addr = idx->saddr.addr32[0];
+ }
+
+ ret = (!resolve_name)?-1:getnameinfo((struct sockaddr *)&myaddr, sizeof(myaddr), hostname,
+ sizeof(hostname), service_name, sizeof(service_name), NI_NAMEREQD);
+
+ if (!ret && !resolve_service) {
+ snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr.sin_port));
+ }
+
+ if (ret) {
+ // I cannot resolve the name, I will use the IP
+ if (!inet_ntop(AF_INET, &myaddr.sin_addr.s_addr, hostname, NI_MAXHOST)) {
+ strncpy(hostname, errname, 13);
+ }
+
+ snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr.sin_port));
+ ret = 1;
+ }
+ } else { // IPV6
+ struct sockaddr_in6 myaddr6;
+ memset(&myaddr6, 0 , sizeof(myaddr6));
+
+ myaddr6.sin6_family = AF_INET6;
+ if (is_outbound) {
+ myaddr6.sin6_port = idx->dport;
+ memcpy(myaddr6.sin6_addr.s6_addr, idx->daddr.addr8, sizeof(union netdata_ip_t));
+ } else {
+ myaddr6.sin6_port = idx->sport;
+ memcpy(myaddr6.sin6_addr.s6_addr, idx->saddr.addr8, sizeof(union netdata_ip_t));
+ }
+
+ ret = (!resolve_name)?-1:getnameinfo((struct sockaddr *)&myaddr6, sizeof(myaddr6), hostname,
+ sizeof(hostname), service_name, sizeof(service_name), NI_NAMEREQD);
+
+ if (!ret && !resolve_service) {
+ snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr6.sin6_port));
+ }
+
+ if (ret) {
+ // I cannot resolve the name, I will use the IP
+ if (!inet_ntop(AF_INET6, myaddr6.sin6_addr.s6_addr, hostname, NI_MAXHOST)) {
+ strncpy(hostname, errname, 13);
+ }
+
+ snprintf(service_name, sizeof(service_name), "%u", ntohs(myaddr6.sin6_port));
+
+ ret = 1;
+ }
+ }
+
+ fill_resolved_name(ptr, hostname,
+ strlen(hostname) + strlen(service_name)+ NETDATA_DOTS_PROTOCOL_COMBINED_LENGTH,
+ service_name, is_outbound);
+
+ if (resolve_name && !ret)
+ ret = hostname_matches_pattern(hostname);
+
+ ptr->resolved++;
+
+ return ret;
+}
+
+/**
+ * Fill last Network Viewer Dimension
+ *
+ * Fill the unique dimension that is always plotted.
+ *
+ * @param ptr the pointer for the last dimension
+ * @param is_outbound is this an inbound structure?
+ */
+static void fill_last_nv_dimension(netdata_socket_plot_t *ptr, int is_outbound)
+{
+ char hostname[NI_MAXHOST], service_name[NI_MAXSERV];
+ char *other = { "other" };
+ // We are also copying the NULL bytes to avoid warnings in new compilers
+ strncpy(hostname, other, 6);
+ strncpy(service_name, other, 6);
+
+ ptr->family = AF_INET;
+ ptr->sock.protocol = 255;
+ ptr->flags = (!is_outbound)?NETDATA_INBOUND_DIRECTION:NETDATA_OUTBOUND_DIRECTION;
+
+ fill_resolved_name(ptr, hostname, 10 + NETDATA_DOTS_PROTOCOL_COMBINED_LENGTH, service_name, is_outbound);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("Last %s dimension added: ID = %u, IP = OTHER, NAME = %s, DIM1 = %s, DIM2 = %s, DIM3 = %s",
+ (is_outbound)?"outbound":"inbound", network_viewer_opt.max_dim - 1, ptr->resolved_name,
+ ptr->dimension_recv, ptr->dimension_sent, ptr->dimension_retransmit);
+#endif
+}
+
+/**
+ * Update Socket Data
+ *
+ * Update the socket information with last collected data
+ *
+ * @param sock
+ * @param lvalues
+ */
+static inline void update_socket_data(netdata_socket_t *sock, netdata_socket_t *lvalues)
+{
+ sock->recv_packets += lvalues->recv_packets;
+ sock->sent_packets += lvalues->sent_packets;
+ sock->recv_bytes += lvalues->recv_bytes;
+ sock->sent_bytes += lvalues->sent_bytes;
+ sock->retransmit += lvalues->retransmit;
+
+ if (lvalues->ct > sock->ct)
+ sock->ct = lvalues->ct;
+}
+
+/**
+ * Store socket inside avl
+ *
+ * Store the socket values inside the avl tree.
+ *
+ * @param out the structure with information used to plot charts.
+ * @param lvalues Values read from socket ring.
+ * @param lindex the index information, the real socket.
+ * @param family the family associated to the socket
+ * @param flags the connection flags
+ */
+static void store_socket_inside_avl(netdata_vector_plot_t *out, netdata_socket_t *lvalues,
+ netdata_socket_idx_t *lindex, int family, uint32_t flags)
+{
+ netdata_socket_plot_t test, *ret ;
+
+ memcpy(&test.index, lindex, sizeof(netdata_socket_idx_t));
+ test.flags = flags;
+
+ ret = (netdata_socket_plot_t *) avl_search_lock(&out->tree, (avl *)&test);
+ if (ret) {
+ if (lvalues->ct > ret->plot.last_time) {
+ update_socket_data(&ret->sock, lvalues);
+ }
+ } else {
+ uint32_t curr = out->next;
+ uint32_t last = out->last;
+
+ netdata_socket_plot_t *w = &out->plot[curr];
+
+ int resolved;
+ if (curr == last) {
+ if (lvalues->ct > w->plot.last_time) {
+ update_socket_data(&w->sock, lvalues);
+ }
+ return;
+ } else {
+ memcpy(&w->sock, lvalues, sizeof(netdata_socket_t));
+ memcpy(&w->index, lindex, sizeof(netdata_socket_idx_t));
+ w->family = family;
+
+ resolved = fill_names(w, out != (netdata_vector_plot_t *)&inbound_vectors);
+ }
+
+ if (!resolved) {
+ freez(w->resolved_name);
+ freez(w->dimension_sent);
+ freez(w->dimension_recv);
+ freez(w->dimension_retransmit);
+
+ memset(w, 0, sizeof(netdata_socket_plot_t));
+
+ return;
+ }
+
+ w->flags = flags;
+ netdata_socket_plot_t *check ;
+ check = (netdata_socket_plot_t *) avl_insert_lock(&out->tree, (avl *)w);
+ if (check != w)
+ error("Internal error, cannot insert the AVL tree.");
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ char iptext[INET6_ADDRSTRLEN];
+ if (inet_ntop(family, &w->index.daddr.addr8, iptext, sizeof(iptext)))
+ info("New %s dimension added: ID = %u, IP = %s, NAME = %s, DIM1 = %s, DIM2 = %s, DIM3 = %s",
+ (out == &inbound_vectors)?"inbound":"outbound", curr, iptext, w->resolved_name,
+ w->dimension_recv, w->dimension_sent, w->dimension_retransmit);
+#endif
+ curr++;
+ if (curr > last)
+ curr = last;
+ out->next = curr;
+ }
+}
+
+/**
+ * Compare Vector to store
+ *
+ * Compare input values with local address to select table to store.
+ *
+ * @param direction store inbound and outbound direction.
+ * @param cmp index read from hash table.
+ * @param proto the protocol read.
+ *
+ * @return It returns the structure with address to compare.
+ */
+netdata_vector_plot_t * select_vector_to_store(uint32_t *direction, netdata_socket_idx_t *cmp, uint8_t proto)
+{
+ if (!listen_ports) {
+ *direction = NETDATA_OUTBOUND_DIRECTION;
+ return &outbound_vectors;
+ }
+
+ ebpf_network_viewer_port_list_t *move_ports = listen_ports;
+ while (move_ports) {
+ if (move_ports->protocol == proto && move_ports->first == cmp->sport) {
+ *direction = NETDATA_INBOUND_DIRECTION;
+ return &inbound_vectors;
+ }
+
+ move_ports = move_ports->next;
+ }
+
+ *direction = NETDATA_OUTBOUND_DIRECTION;
+ return &outbound_vectors;
+}
+
+/**
+ * Hash accumulator
+ *
+ * @param values the values used to calculate the data.
+ * @param key the key to store data.
+ * @param removesock check if this socket must be removed .
+ * @param family the connection family
+ * @param end the values size.
+ */
+static void hash_accumulator(netdata_socket_t *values, netdata_socket_idx_t *key, int *removesock, int family, int end)
+{
+ uint64_t bsent = 0, brecv = 0, psent = 0, precv = 0;
+ uint16_t retransmit = 0;
+ int i;
+ uint8_t protocol = values[0].protocol;
+ uint64_t ct = values[0].ct;
+ for (i = 1; i < end; i++) {
+ netdata_socket_t *w = &values[i];
+
+ precv += w->recv_packets;
+ psent += w->sent_packets;
+ brecv += w->recv_bytes;
+ bsent += w->sent_bytes;
+ retransmit += w->retransmit;
+
+ if (!protocol)
+ protocol = w->protocol;
+
+ if (w->ct > ct)
+ ct = w->ct;
+
+ *removesock += (int)w->removeme;
+ }
+
+ values[0].recv_packets += precv;
+ values[0].sent_packets += psent;
+ values[0].recv_bytes += brecv;
+ values[0].sent_bytes += bsent;
+ values[0].retransmit += retransmit;
+ values[0].removeme += (uint8_t)*removesock;
+ values[0].protocol = (!protocol)?IPPROTO_TCP:protocol;
+ values[0].ct = ct;
+
+ if (is_socket_allowed(key, family)) {
+ uint32_t dir;
+ netdata_vector_plot_t *table = select_vector_to_store(&dir, key, protocol);
+ store_socket_inside_avl(table, &values[0], key, family, dir);
+ }
+}
+
+/**
+ * Read socket hash table
+ *
+ * Read data from hash tables created on kernel ring.
+ *
+ * @param fd the hash table with data.
+ * @param family the family associated to the hash table
+ *
+ * @return it returns 0 on success and -1 otherwise.
+ */
+static void read_socket_hash_table(int fd, int family, int network_connection)
+{
+ if (wait_to_plot)
+ return;
+
+ netdata_socket_idx_t key = {};
+ netdata_socket_idx_t next_key;
+ netdata_socket_idx_t removeme;
+ int removesock = 0;
+
+ netdata_socket_t *values = socket_values;
+ size_t length = ebpf_nprocs*sizeof(netdata_socket_t);
+ int test, end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ // We need to reset the values when we are working on kernel 4.15 or newer, because kernel does not create
+ // values for specific processor unless it is used to store data. As result of this behavior one the next socket
+ // can have values from the previous one.
+ memset(values, 0, length);
+ test = bpf_map_lookup_elem(fd, &key, values);
+ if (test < 0) {
+ key = next_key;
+ continue;
+ }
+
+ if (removesock)
+ bpf_map_delete_elem(fd, &removeme);
+
+ if (network_connection) {
+ removesock = 0;
+ hash_accumulator(values, &key, &removesock, family, end);
+ }
+
+ if (removesock)
+ removeme = key;
+
+ key = next_key;
+ }
+
+ if (removesock)
+ bpf_map_delete_elem(fd, &removeme);
+
+ test = bpf_map_lookup_elem(fd, &next_key, values);
+ if (test < 0) {
+ return;
+ }
+
+ if (network_connection) {
+ removesock = 0;
+ hash_accumulator(values, &next_key, &removesock, family, end);
+ }
+
+ if (removesock)
+ bpf_map_delete_elem(fd, &next_key);
+}
+
+/**
+ * Update listen table
+ *
+ * Update link list when it is necessary.
+ *
+ * @param value the ports we are listen to.
+ * @param proto the protocol used with port connection.
+ */
+void update_listen_table(uint16_t value, uint8_t proto)
+{
+ ebpf_network_viewer_port_list_t *w;
+ if (likely(listen_ports)) {
+ ebpf_network_viewer_port_list_t *move = listen_ports, *store = listen_ports;
+ while (move) {
+ if (move->protocol == proto && move->first == value)
+ return;
+
+ store = move;
+ move = move->next;
+ }
+
+ w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
+ w->first = value;
+ w->protocol = proto;
+ store->next = w;
+ } else {
+ w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
+ w->first = value;
+ w->protocol = proto;
+
+ listen_ports = w;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("The network viewer is monitoring inbound connections for port %u", ntohs(value));
+#endif
+}
+
+/**
+ * Read listen table
+ *
+ * Read the table with all ports that we are listen on host.
+ */
+static void read_listen_table()
+{
+ uint16_t key = 0;
+ uint16_t next_key;
+
+ int fd = map_fd[NETDATA_SOCKET_LISTEN_TABLE];
+ uint8_t value;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ int test = bpf_map_lookup_elem(fd, &key, &value);
+ if (test < 0) {
+ key = next_key;
+ continue;
+ }
+
+ // The correct protocol must come from kernel
+ update_listen_table(htons(key), (key == 53)?IPPROTO_UDP:IPPROTO_TCP);
+
+ key = next_key;
+ }
+
+ if (next_key) {
+ // The correct protocol must come from kernel
+ update_listen_table(htons(next_key), (key == 53)?IPPROTO_UDP:IPPROTO_TCP);
+ }
+}
+
+/**
+ * Socket read hash
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_socket_read_hash(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ read_thread_closed = 0;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ usec_t step = NETDATA_SOCKET_READ_SLEEP_MS;
+ int fd_ipv4 = map_fd[NETDATA_SOCKET_IPV4_HASH_TABLE];
+ int fd_ipv6 = map_fd[NETDATA_SOCKET_IPV6_HASH_TABLE];
+ int network_connection = em->optional;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ pthread_mutex_lock(&nv_mutex);
+ read_listen_table();
+ read_socket_hash_table(fd_ipv4, AF_INET, network_connection);
+ read_socket_hash_table(fd_ipv6, AF_INET6, network_connection);
+ wait_to_plot = 1;
+ pthread_mutex_unlock(&nv_mutex);
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void read_hash_global_tables()
+{
+ uint64_t idx;
+ netdata_idx_t res[NETDATA_SOCKET_COUNTER];
+
+ netdata_idx_t *val = socket_hash_values;
+ int fd = map_fd[NETDATA_SOCKET_GLOBAL_HASH_TABLE];
+ for (idx = 0; idx < NETDATA_SOCKET_COUNTER; idx++) {
+ if (!bpf_map_lookup_elem(fd, &idx, val)) {
+ uint64_t total = 0;
+ int i;
+ int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ for (i = 0; i < end; i++)
+ total += val[i];
+
+ res[idx] = total;
+ } else {
+ res[idx] = 0;
+ }
+ }
+
+ socket_aggregated_data[0].call = res[NETDATA_KEY_CALLS_TCP_SENDMSG];
+ socket_aggregated_data[1].call = res[NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF];
+ socket_aggregated_data[2].call = res[NETDATA_KEY_CALLS_TCP_CLOSE];
+ socket_aggregated_data[3].call = res[NETDATA_KEY_CALLS_UDP_RECVMSG];
+ socket_aggregated_data[4].call = res[NETDATA_KEY_CALLS_UDP_SENDMSG];
+ socket_aggregated_data[5].call = res[NETDATA_KEY_TCP_RETRANSMIT];
+
+ socket_aggregated_data[0].ecall = res[NETDATA_KEY_ERROR_TCP_SENDMSG];
+ socket_aggregated_data[1].ecall = res[NETDATA_KEY_ERROR_TCP_CLEANUP_RBUF];
+ socket_aggregated_data[3].ecall = res[NETDATA_KEY_ERROR_UDP_RECVMSG];
+ socket_aggregated_data[4].ecall = res[NETDATA_KEY_ERROR_UDP_SENDMSG];
+
+ socket_aggregated_data[0].bytes = res[NETDATA_KEY_BYTES_TCP_SENDMSG];
+ socket_aggregated_data[1].bytes = res[NETDATA_KEY_BYTES_TCP_CLEANUP_RBUF];
+ socket_aggregated_data[3].bytes = res[NETDATA_KEY_BYTES_UDP_RECVMSG];
+ socket_aggregated_data[4].bytes = res[NETDATA_KEY_BYTES_UDP_SENDMSG];
+}
+
+/**
+ * Fill publish apps when necessary.
+ *
+ * @param current_pid the PID that I am updating
+ * @param eb the structure with data read from memory.
+ */
+void ebpf_socket_fill_publish_apps(uint32_t current_pid, ebpf_bandwidth_t *eb)
+{
+ ebpf_socket_publish_apps_t *curr = socket_bandwidth_curr[current_pid];
+ ebpf_socket_publish_apps_t *prev = socket_bandwidth_prev[current_pid];
+ if (!curr) {
+ ebpf_socket_publish_apps_t *ptr = callocz(2, sizeof(ebpf_socket_publish_apps_t));
+ curr = &ptr[0];
+ socket_bandwidth_curr[current_pid] = curr;
+ prev = &ptr[1];
+ socket_bandwidth_prev[current_pid] = prev;
+ } else {
+ memcpy(prev, curr, sizeof(ebpf_socket_publish_apps_t));
+ }
+
+ curr->bytes_sent = eb->bytes_sent;
+ curr->bytes_received = eb->bytes_received;
+ curr->call_tcp_sent = eb->call_tcp_sent;
+ curr->call_tcp_received = eb->call_tcp_received;
+ curr->retransmit = eb->retransmit;
+ curr->call_udp_sent = eb->call_udp_sent;
+ curr->call_udp_received = eb->call_udp_received;
+
+ ebpf_socket_update_apps_publish(curr, prev);
+}
+
+/**
+ * Bandwidth accumulator.
+ *
+ * @param out the vector with the values to sum
+ */
+void ebpf_socket_bandwidth_accumulator(ebpf_bandwidth_t *out)
+{
+ int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ ebpf_bandwidth_t *total = &out[0];
+ for (i = 1; i < end; i++) {
+ ebpf_bandwidth_t *move = &out[i];
+ total->bytes_sent += move->bytes_sent;
+ total->bytes_received += move->bytes_received;
+ total->call_tcp_sent += move->call_tcp_sent;
+ total->call_tcp_received += move->call_tcp_received;
+ total->retransmit += move->retransmit;
+ total->call_udp_sent += move->call_udp_sent;
+ total->call_udp_received += move->call_udp_received;
+ }
+}
+
+/**
+ * Update the apps data reading information from the hash table
+ */
+static void ebpf_socket_update_apps_data()
+{
+ int fd = map_fd[NETDATA_SOCKET_APPS_HASH_TABLE];
+ ebpf_bandwidth_t *eb = bandwidth_vector;
+ uint32_t key;
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ key = pids->pid;
+
+ if (bpf_map_lookup_elem(fd, &key, eb)) {
+ pids = pids->next;
+ continue;
+ }
+
+ ebpf_socket_bandwidth_accumulator(eb);
+
+ ebpf_socket_fill_publish_apps(key, eb);
+
+ pids = pids->next;
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS WITH THE MAIN LOOP
+ *
+ *****************************************************************/
+
+struct netdata_static_thread socket_threads = {"EBPF SOCKET READ",
+ NULL, NULL, 1, NULL,
+ NULL, ebpf_socket_read_hash };
+
+/**
+ * Main loop for this collector.
+ *
+ * @param step the number of microseconds used with heart beat
+ * @param em the structure with thread information
+ */
+static void socket_collector(usec_t step, ebpf_module_t *em)
+{
+ UNUSED(em);
+ UNUSED(step);
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ socket_threads.thread = mallocz(sizeof(netdata_thread_t));
+
+ netdata_thread_create(socket_threads.thread, socket_threads.name,
+ NETDATA_THREAD_OPTION_JOINABLE, ebpf_socket_read_hash, em);
+
+ int socket_apps_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].apps_charts;
+ int socket_global_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].global_charts;
+ int network_connection = em->optional;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (socket_global_enabled)
+ read_hash_global_tables();
+
+ if (socket_apps_enabled)
+ ebpf_socket_update_apps_data();
+
+ calculate_nv_plot();
+
+ pthread_mutex_lock(&lock);
+ if (socket_global_enabled)
+ ebpf_socket_send_data(em);
+
+ if (socket_apps_enabled)
+ ebpf_socket_send_apps_data(em, apps_groups_root_target);
+
+ fflush(stdout);
+
+ if (network_connection) {
+ // We are calling fflush many times, because when we have a lot of dimensions
+ // we began to have not expected outputs and Netdata closed the plugin.
+ pthread_mutex_lock(&nv_mutex);
+ ebpf_socket_create_nv_charts(&inbound_vectors);
+ fflush(stdout);
+ ebpf_socket_send_nv_data(&inbound_vectors);
+
+ ebpf_socket_create_nv_charts(&outbound_vectors);
+ fflush(stdout);
+ ebpf_socket_send_nv_data(&outbound_vectors);
+ wait_to_plot = 0;
+ pthread_mutex_unlock(&nv_mutex);
+
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ pthread_mutex_unlock(&lock);
+
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+
+/**
+ * Clean internal socket plot
+ *
+ * Clean all structures allocated with strdupz.
+ *
+ * @param ptr the pointer with addresses to clean.
+ */
+static inline void clean_internal_socket_plot(netdata_socket_plot_t *ptr)
+{
+ freez(ptr->dimension_recv);
+ freez(ptr->dimension_sent);
+ freez(ptr->resolved_name);
+ freez(ptr->dimension_retransmit);
+}
+
+/**
+ * Clean socket plot
+ *
+ * Clean the allocated data for inbound and outbound vectors.
+ */
+static void clean_allocated_socket_plot()
+{
+ uint32_t i;
+ uint32_t end = inbound_vectors.last;
+ netdata_socket_plot_t *plot = inbound_vectors.plot;
+ for (i = 0; i < end; i++) {
+ clean_internal_socket_plot(&plot[i]);
+ }
+
+ clean_internal_socket_plot(&plot[inbound_vectors.last]);
+
+ end = outbound_vectors.last;
+ plot = outbound_vectors.plot;
+ for (i = 0; i < end; i++) {
+ clean_internal_socket_plot(&plot[i]);
+ }
+ clean_internal_socket_plot(&plot[outbound_vectors.last]);
+}
+
+/**
+ * Clean netowrk ports allocated during initializaion.
+ *
+ * @param ptr a pointer to the link list.
+ */
+static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr)
+{
+ if (unlikely(!ptr))
+ return;
+
+ while (ptr) {
+ ebpf_network_viewer_port_list_t *next = ptr->next;
+ freez(ptr->value);
+ freez(ptr);
+ ptr = next;
+ }
+}
+
+/**
+ * Clean service names
+ *
+ * Clean the allocated link list that stores names.
+ *
+ * @param names the link list.
+ */
+static void clean_service_names(ebpf_network_viewer_dim_name_t *names)
+{
+ if (unlikely(!names))
+ return;
+
+ while (names) {
+ ebpf_network_viewer_dim_name_t *next = names->next;
+ freez(names->name);
+ freez(names);
+ names = next;
+ }
+}
+
+/**
+ * Clean hostnames
+ *
+ * @param hostnames the hostnames to clean
+ */
+static void clean_hostnames(ebpf_network_viewer_hostname_list_t *hostnames)
+{
+ if (unlikely(!hostnames))
+ return;
+
+ while (hostnames) {
+ ebpf_network_viewer_hostname_list_t *next = hostnames->next;
+ freez(hostnames->value);
+ simple_pattern_free(hostnames->value_pattern);
+ freez(hostnames);
+ hostnames = next;
+ }
+}
+
+void clean_thread_structures() {
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ freez(socket_bandwidth_curr[pids->pid]);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_socket_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 200*USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(socket_aggregated_data);
+ freez(socket_publish_aggregated);
+ freez(socket_hash_values);
+
+ clean_thread_structures();
+ freez(socket_bandwidth_curr);
+ freez(socket_bandwidth_prev);
+ freez(bandwidth_vector);
+
+ freez(socket_values);
+ clean_allocated_socket_plot();
+ freez(inbound_vectors.plot);
+ freez(outbound_vectors.plot);
+
+ clean_port_structure(&listen_ports);
+
+ ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled = 0;
+
+ clean_network_ports(network_viewer_opt.included_port);
+ clean_network_ports(network_viewer_opt.excluded_port);
+ clean_service_names(network_viewer_opt.names);
+ clean_hostnames(network_viewer_opt.included_hostnames);
+ clean_hostnames(network_viewer_opt.excluded_hostnames);
+
+ pthread_mutex_destroy(&nv_mutex);
+ freez(socket_data.map_fd);
+
+ freez(socket_threads.thread);
+
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ finalized_threads = 1;
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO START THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Allocate vectors used with this thread.
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param length is the length for the vectors used inside the collector.
+ */
+static void ebpf_socket_allocate_global_vectors(size_t length)
+{
+ socket_aggregated_data = callocz(length, sizeof(netdata_syscall_stat_t));
+ socket_publish_aggregated = callocz(length, sizeof(netdata_publish_syscall_t));
+ socket_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+
+ socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *));
+ socket_bandwidth_prev = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *));
+ bandwidth_vector = callocz((size_t)ebpf_nprocs, sizeof(ebpf_bandwidth_t));
+
+ socket_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_socket_t));
+ inbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t));
+ outbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t));
+}
+
+/**
+ * Set local function pointers, this function will never be compiled with static libraries
+ */
+static void set_local_pointers()
+{
+ map_fd = socket_data.map_fd;
+}
+
+/**
+ * Initialize Inbound and Outbound
+ *
+ * Initialize the common outbound and inbound sockets.
+ */
+static void initialize_inbound_outbound()
+{
+ inbound_vectors.last = network_viewer_opt.max_dim - 1;
+ outbound_vectors.last = inbound_vectors.last;
+ fill_last_nv_dimension(&inbound_vectors.plot[inbound_vectors.last], 0);
+ fill_last_nv_dimension(&outbound_vectors.plot[outbound_vectors.last], 1);
+}
+
+/*****************************************************************
+ *
+ * EBPF SOCKET THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Socket thread
+ *
+ * Thread used to generate socket charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_socket_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_socket_cleanup, ptr);
+
+ avl_init_lock(&inbound_vectors.tree, compare_sockets);
+ avl_init_lock(&outbound_vectors.tree, compare_sockets);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ fill_ebpf_data(&socket_data);
+
+ if (!em->enabled)
+ goto endsocket;
+
+ if (pthread_mutex_init(&nv_mutex, NULL)) {
+ error("Cannot initialize local mutex");
+ goto endsocket;
+ }
+ pthread_mutex_lock(&lock);
+
+ ebpf_socket_allocate_global_vectors(NETDATA_MAX_SOCKET_VECTOR);
+ initialize_inbound_outbound();
+
+ if (ebpf_update_kernel(&socket_data)) {
+ pthread_mutex_unlock(&lock);
+ goto endsocket;
+ }
+
+ set_local_pointers();
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, socket_data.map_fd);
+ if (!probe_links) {
+ pthread_mutex_unlock(&lock);
+ goto endsocket;
+ }
+
+ ebpf_global_labels(
+ socket_aggregated_data, socket_publish_aggregated, socket_dimension_names, socket_id_names,
+ NETDATA_MAX_SOCKET_VECTOR);
+
+ ebpf_create_global_charts(em);
+
+ finalized_threads = 0;
+ pthread_mutex_unlock(&lock);
+
+ socket_collector((usec_t)(em->update_time * USEC_PER_SEC), em);
+
+endsocket:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h
new file mode 100644
index 000000000..0e19f80e8
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_socket.h
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+#ifndef NETDATA_EBPF_SOCKET_H
+#define NETDATA_EBPF_SOCKET_H 1
+#include <stdint.h>
+#include "libnetdata/avl/avl.h"
+
+// Vector indexes
+#define NETDATA_MAX_SOCKET_VECTOR 6
+#define NETDATA_UDP_START 3
+#define NETDATA_RETRANSMIT_START 5
+
+#define NETDATA_SOCKET_APPS_HASH_TABLE 0
+#define NETDATA_SOCKET_IPV4_HASH_TABLE 1
+#define NETDATA_SOCKET_IPV6_HASH_TABLE 2
+#define NETDATA_SOCKET_GLOBAL_HASH_TABLE 4
+#define NETDATA_SOCKET_LISTEN_TABLE 5
+
+#define NETDATA_SOCKET_READ_SLEEP_MS 800000ULL
+
+typedef enum ebpf_socket_idx {
+ NETDATA_KEY_CALLS_TCP_SENDMSG,
+ NETDATA_KEY_ERROR_TCP_SENDMSG,
+ NETDATA_KEY_BYTES_TCP_SENDMSG,
+
+ NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF,
+ NETDATA_KEY_ERROR_TCP_CLEANUP_RBUF,
+ NETDATA_KEY_BYTES_TCP_CLEANUP_RBUF,
+
+ NETDATA_KEY_CALLS_TCP_CLOSE,
+
+ NETDATA_KEY_CALLS_UDP_RECVMSG,
+ NETDATA_KEY_ERROR_UDP_RECVMSG,
+ NETDATA_KEY_BYTES_UDP_RECVMSG,
+
+ NETDATA_KEY_CALLS_UDP_SENDMSG,
+ NETDATA_KEY_ERROR_UDP_SENDMSG,
+ NETDATA_KEY_BYTES_UDP_SENDMSG,
+
+ NETDATA_KEY_TCP_RETRANSMIT,
+
+ NETDATA_SOCKET_COUNTER
+} ebpf_socket_index_t;
+
+#define NETDATA_SOCKET_GROUP "Socket"
+#define NETDATA_NETWORK_CONNECTIONS_GROUP "Network connections"
+
+// Global chart name
+#define NETDATA_TCP_FUNCTION_COUNT "tcp_functions"
+#define NETDATA_TCP_FUNCTION_BYTES "tcp_bandwidth"
+#define NETDATA_TCP_FUNCTION_ERROR "tcp_error"
+#define NETDATA_TCP_RETRANSMIT "tcp_retransmit"
+#define NETDATA_UDP_FUNCTION_COUNT "udp_functions"
+#define NETDATA_UDP_FUNCTION_BYTES "udp_bandwidth"
+#define NETDATA_UDP_FUNCTION_ERROR "udp_error"
+
+// Charts created on Apps submenu
+#define NETDATA_NET_APPS_BANDWIDTH_SENT "bandwidth_sent"
+#define NETDATA_NET_APPS_BANDWIDTH_RECV "bandwidth_recv"
+#define NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS "bandwidth_tcp_send"
+#define NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS "bandwidth_tcp_recv"
+#define NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT "bandwidth_tcp_retransmit"
+#define NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS "bandwidth_udp_send"
+#define NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS "bandwidth_udp_recv"
+
+// Network viewer charts
+#define NETDATA_NV_OUTBOUND_BYTES "outbound_bytes"
+#define NETDATA_NV_OUTBOUND_PACKETS "outbound_packets"
+#define NETDATA_NV_OUTBOUND_RETRANSMIT "outbound_retransmit"
+#define NETDATA_NV_INBOUND_BYTES "inbound_bytes"
+#define NETDATA_NV_INBOUND_PACKETS "inbound_packets"
+
+// Port range
+#define NETDATA_MINIMUM_PORT_VALUE 1
+#define NETDATA_MAXIMUM_PORT_VALUE 65535
+
+#define NETDATA_MINIMUM_IPV4_CIDR 0
+#define NETDATA_MAXIMUM_IPV4_CIDR 32
+
+typedef struct ebpf_socket_publish_apps {
+ // Data read
+ uint64_t bytes_sent; // Bytes sent
+ uint64_t bytes_received; // Bytes received
+ uint64_t call_tcp_sent; // Number of times tcp_sendmsg was called
+ uint64_t call_tcp_received; // Number of times tcp_cleanup_rbuf was called
+ uint64_t retransmit; // Number of times tcp_retransmit was called
+ uint64_t call_udp_sent; // Number of times udp_sendmsg was called
+ uint64_t call_udp_received; // Number of times udp_recvmsg was called
+
+ // Publish information.
+ uint64_t publish_sent_bytes;
+ uint64_t publish_received_bytes;
+ uint64_t publish_tcp_sent;
+ uint64_t publish_tcp_received;
+ uint64_t publish_retransmit;
+ uint64_t publish_udp_sent;
+ uint64_t publish_udp_received;
+} ebpf_socket_publish_apps_t;
+
+typedef struct ebpf_network_viewer_dimension_names {
+ char *name;
+ uint32_t hash;
+
+ uint16_t port;
+
+ struct ebpf_network_viewer_dimension_names *next;
+} ebpf_network_viewer_dim_name_t ;
+
+typedef struct ebpf_network_viewer_port_list {
+ char *value;
+ uint32_t hash;
+
+ uint16_t first;
+ uint16_t last;
+
+ uint16_t cmp_first;
+ uint16_t cmp_last;
+
+ uint8_t protocol;
+ struct ebpf_network_viewer_port_list *next;
+} ebpf_network_viewer_port_list_t;
+
+/**
+ * Union used to store ip addresses
+ */
+union netdata_ip_t {
+ uint8_t addr8[16];
+ uint16_t addr16[8];
+ uint32_t addr32[4];
+ uint64_t addr64[2];
+};
+
+typedef struct ebpf_network_viewer_ip_list {
+ char *value; // IP value
+ uint32_t hash; // IP hash
+
+ uint8_t ver; // IP version
+
+ union netdata_ip_t first; // The IP address informed
+ union netdata_ip_t last; // The IP address informed
+
+ struct ebpf_network_viewer_ip_list *next;
+} ebpf_network_viewer_ip_list_t;
+
+typedef struct ebpf_network_viewer_hostname_list {
+ char *value; // IP value
+ uint32_t hash; // IP hash
+
+ SIMPLE_PATTERN *value_pattern;
+
+ struct ebpf_network_viewer_hostname_list *next;
+} ebpf_network_viewer_hostname_list_t;
+
+#define NETDATA_NV_CAP_VALUE 50L
+typedef struct ebpf_network_viewer_options {
+ uint32_t max_dim; // Store value read from 'maximum dimensions'
+
+ uint32_t hostname_resolution_enabled;
+ uint32_t service_resolution_enabled;
+
+ ebpf_network_viewer_port_list_t *excluded_port;
+ ebpf_network_viewer_port_list_t *included_port;
+
+ ebpf_network_viewer_dim_name_t *names;
+
+ ebpf_network_viewer_ip_list_t *excluded_ips;
+ ebpf_network_viewer_ip_list_t *included_ips;
+
+ ebpf_network_viewer_hostname_list_t *excluded_hostnames;
+ ebpf_network_viewer_hostname_list_t *included_hostnames;
+
+ ebpf_network_viewer_ip_list_t *ipv4_local_ip;
+ ebpf_network_viewer_ip_list_t *ipv6_local_ip;
+} ebpf_network_viewer_options_t;
+
+extern ebpf_network_viewer_options_t network_viewer_opt;
+
+/**
+ * Structure to store socket information
+ */
+typedef struct netdata_socket {
+ uint64_t recv_packets;
+ uint64_t sent_packets;
+ uint64_t recv_bytes;
+ uint64_t sent_bytes;
+ uint64_t first; // First timestamp
+ uint64_t ct; // Current timestamp
+ uint16_t retransmit; // It is never used with UDP
+ uint8_t protocol;
+ uint8_t removeme;
+ uint32_t reserved;
+} netdata_socket_t __attribute__((__aligned__(8)));
+
+
+typedef struct netdata_plot_values {
+ // Values used in the previous iteration
+ uint64_t recv_packets;
+ uint64_t sent_packets;
+ uint64_t recv_bytes;
+ uint64_t sent_bytes;
+ uint16_t retransmit;
+
+ uint64_t last_time;
+
+ // Values used to plot
+ uint64_t plot_recv_packets;
+ uint64_t plot_sent_packets;
+ uint64_t plot_recv_bytes;
+ uint64_t plot_sent_bytes;
+ uint16_t plot_retransmit;
+} netdata_plot_values_t;
+
+/**
+ * Index used together previous structure
+ */
+typedef struct netdata_socket_idx {
+ union netdata_ip_t saddr;
+ uint16_t sport;
+ union netdata_ip_t daddr;
+ uint16_t dport;
+} netdata_socket_idx_t __attribute__((__aligned__(8)));
+
+// Next values were defined according getnameinfo(3)
+#define NETDATA_MAX_NETWORK_COMBINED_LENGTH 1018
+#define NETDATA_DOTS_PROTOCOL_COMBINED_LENGTH 5 // :TCP:
+#define NETDATA_DIM_LENGTH_WITHOUT_SERVICE_PROTOCOL 979
+
+#define NETDATA_INBOUND_DIRECTION (uint32_t)1
+#define NETDATA_OUTBOUND_DIRECTION (uint32_t)2
+/**
+ * Allocate the maximum number of structures in the beginning, this can force the collector to use more memory
+ * in the long term, on the other had it is faster.
+ */
+typedef struct netdata_socket_plot {
+ // Search
+ avl avl;
+ netdata_socket_idx_t index;
+
+ // Current data
+ netdata_socket_t sock;
+
+ // Previous values and values used to write on chart.
+ netdata_plot_values_t plot;
+
+ int family; // AF_INET or AF_INET6
+ char *resolved_name; // Resolve only in the first call
+ unsigned char resolved;
+
+ char *dimension_sent;
+ char *dimension_recv;
+ char *dimension_retransmit;
+
+ uint32_t flags;
+} netdata_socket_plot_t;
+
+#define NETWORK_VIEWER_CHARTS_CREATED (uint32_t)1
+typedef struct netdata_vector_plot {
+ netdata_socket_plot_t *plot; // Vector used to plot charts
+
+ avl_tree_lock tree; // AVL tree to speed up search
+ uint32_t last; // The 'other' dimension, the last chart accepted.
+ uint32_t next; // The next position to store in the vector.
+ uint32_t max_plot; // Max number of elements to plot.
+ uint32_t last_plot; // Last element plot
+
+ uint32_t flags; // Flags
+
+} netdata_vector_plot_t;
+
+extern void clean_port_structure(ebpf_network_viewer_port_list_t **clean);
+extern ebpf_network_viewer_port_list_t *listen_ports;
+extern void update_listen_table(uint16_t value, uint8_t proto);
+
+extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
+extern ebpf_socket_publish_apps_t **socket_bandwidth_prev;
+
+#endif
diff --git a/collectors/ebpf.plugin/reset_netdata_trace.sh.in b/collectors/ebpf.plugin/reset_netdata_trace.sh.in
new file mode 100644
index 000000000..51d981ee3
--- /dev/null
+++ b/collectors/ebpf.plugin/reset_netdata_trace.sh.in
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+KPROBE_FILE="/sys/kernel/debug/tracing/kprobe_events"
+
+DATA="$(grep _netdata_ $KPROBE_FILE| cut -d' ' -f1 | cut -d: -f2)"
+
+for I in $DATA; do
+ echo "-:$I" > $KPROBE_FILE 2>/dev/null;
+done
diff --git a/collectors/fping.plugin/Makefile.in b/collectors/fping.plugin/Makefile.in
deleted file mode 100644
index ea4071723..000000000
--- a/collectors/fping.plugin/Makefile.in
+++ /dev/null
@@ -1,646 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/fping.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" \
- "$(DESTDIR)$(libconfigdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- fping.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_plugins_SCRIPTS = \
- fping.plugin \
- $(NULL)
-
-dist_noinst_DATA = \
- fping.plugin.in \
- README.md \
- $(NULL)
-
-dist_libconfig_DATA = \
- fping.conf \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(am__empty):
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_libconfigDATA \
- install-dist_pluginsSCRIPTS
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_libconfigDATA \
- uninstall-dist_pluginsSCRIPTS
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_libconfigDATA \
- install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS
-
-.PRECIOUS: Makefile
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- -e 's#[@]registrydir_POST@#$(registrydir)#g' \
- -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/fping.plugin/README.md b/collectors/fping.plugin/README.md
index ad79f06a6..4aca2a9de 100644
--- a/collectors/fping.plugin/README.md
+++ b/collectors/fping.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "fping.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/fping.plugin/README.md
+-->
+
# fping.plugin
The fping plugin supports monitoring latency, packet loss and uptime of any number of network end points,
@@ -37,7 +42,7 @@ fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
## alarms
Netdata will automatically attach a few alarms for each host.
-Check the [latest versions of the fping alarms](../../health/health.d/fping.conf)
+Check the [latest versions of the fping alarms](https://raw.githubusercontent.com/netdata/netdata/master/health/health.d/fping.conf)
## Additional Tips
diff --git a/collectors/fping.plugin/fping.plugin b/collectors/fping.plugin/fping.plugin
deleted file mode 100644
index 13145d7a4..000000000
--- a/collectors/fping.plugin/fping.plugin
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# This plugin requires a latest version of fping.
-# You can compile it from source, by running me with option: install
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-if [ "${1}" = "install" ]
- then
- [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1
-
- run() {
- printf >&2 " > "
- printf >&2 "%q " "${@}"
- printf >&2 "\n"
- "${@}" || exit 1
- }
-
- download() {
- local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)"
- [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0
-
- local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)"
- [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0
-
- echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1
- }
-
- [ ! -d /usr/src ] && run mkdir -p /usr/src
- [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin
-
- run cd /usr/src
-
- if [ -d fping-4.2 ]
- then
- run rm -rf fping-4.2 || exit 1
- fi
-
- download 'https://github.com/schweikert/fping/releases/download/v4.2/fping-4.2.tar.gz' | run tar -zxvpf -
- [ $? -ne 0 ] && exit 1
- run cd fping-4.2 || exit 1
-
- run ./configure --prefix=/usr/local
- run make clean
- run make
- if [ -f /usr/local/bin/fping ]
- then
- run mv -f /usr/local/bin/fping /usr/local/bin/fping.old
- fi
- run mv src/fping /usr/local/bin/fping
- run chown root:root /usr/local/bin/fping
- run chmod 4755 /usr/local/bin/fping
- echo >&2
- echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping."
- echo >&2
-
- fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)"
- if [ "${fping}" != "/usr/local/bin/fping" ]
- then
- echo >&2 "You have another fping installed at: ${fping}."
- echo >&2 "Please set:"
- echo >&2
- echo >&2 " fping=\"/usr/local/bin/fping\""
- echo >&2
- echo >&2 "at /etc/netdata/fping.conf"
- echo >&2
- fi
- exit 0
-fi
-
-# -----------------------------------------------------------------------------
-
-PROGRAM_NAME="$(basename "${0}")"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- echo "DISABLE"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-
-# store in ${plugin} the name we run under
-# this allows us to copy/link fping.plugin under a different name
-# to have multiple fping plugins running with different settings
-plugin="${PROGRAM_NAME/.plugin/}"
-
-
-# -----------------------------------------------------------------------------
-
-# the frequency to send info to netdata
-# passed by netdata as the first parameter
-update_every="${1-1}"
-
-# the netdata configuration directory
-# passed by netdata as an environment variable
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
-
-# -----------------------------------------------------------------------------
-# configuration options
-# can be overwritten at /etc/netdata/fping.conf
-
-# the fping binary to use
-# we need one that can output netdata friendly info (supporting: -N)
-# if you have multiple versions, put here the full filename of the right one
-fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )"
-
-# a space separated list of hosts to fping
-# we suggest to put names here and the IPs of these names in /etc/hosts
-hosts=""
-
-# the time in milliseconds (1 sec = 1000 ms)
-# to ping the hosts - by default 5 pings per host per iteration
-ping_every="$((update_every * 1000 / 5))"
-
-# fping options
-fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
-
-# -----------------------------------------------------------------------------
-# load the configuration files
-
-for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
-do
- if [ -f "${CONFIG}" ]
- then
- info "Loading config file '${CONFIG}'..."
- source "${CONFIG}"
- [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
- else
- warning "Cannot find file '${CONFIG}'."
- fi
-done
-
-if [ -z "${hosts}" ]
-then
- fatal "no hosts configured - nothing to do."
-fi
-
-if [ -z "${fping}" ]
-then
- fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
-fi
-
-if [ ! -x "${fping}" ]
-then
- fatal "fping command '${fping}' is not executable - cannot proceed."
-fi
-
-if [ ${ping_every} -lt 20 ]
- then
- warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms."
- ping_every=20
-fi
-
-# the fping options we will use
-options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} )
-
-# execute fping
-info "starting fping: ${fping} ${options[*]}"
-exec "${fping}" "${options[@]}"
-
-# if we cannot execute fping, stop
-fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/freebsd.plugin/Makefile.in b/collectors/freebsd.plugin/Makefile.in
deleted file mode 100644
index 24a1015d8..000000000
--- a/collectors/freebsd.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/freebsd.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/freebsd.plugin/README.md b/collectors/freebsd.plugin/README.md
index 07da72e16..1b519a659 100644
--- a/collectors/freebsd.plugin/README.md
+++ b/collectors/freebsd.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "freebsd.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/README.md
+-->
+
# freebsd.plugin
Collects resource usage and performance data on FreeBSD systems
diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c
index 72fa908ce..1437d08fa 100644
--- a/collectors/freebsd.plugin/freebsd_getifaddrs.c
+++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c
@@ -144,7 +144,7 @@ int do_getifaddrs(int update_every, usec_t dt) {
(void)dt;
#define DEFAULT_EXLUDED_INTERFACES "lo*"
-#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx*"
+#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re*"
#define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs"
static int enable_new_interfaces = -1;
diff --git a/collectors/freebsd.plugin/freebsd_ipfw.c b/collectors/freebsd.plugin/freebsd_ipfw.c
index a1e50e204..76466c3dd 100644
--- a/collectors/freebsd.plugin/freebsd_ipfw.c
+++ b/collectors/freebsd.plugin/freebsd_ipfw.c
@@ -235,12 +235,12 @@ int do_ipfw(int update_every, usec_t dt) {
if (likely(do_static)) {
sprintf(rule_num_str, "%d_%d", rule->rulenum, rule->id);
- rd_packets = rrddim_find(st_packets, rule_num_str);
+ rd_packets = rrddim_find_active(st_packets, rule_num_str);
if (unlikely(!rd_packets))
rd_packets = rrddim_add(st_packets, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_set_by_pointer(st_packets, rd_packets, cntr->pcnt);
- rd_bytes = rrddim_find(st_bytes, rule_num_str);
+ rd_bytes = rrddim_find_active(st_bytes, rule_num_str);
if (unlikely(!rd_bytes))
rd_bytes = rrddim_add(st_bytes, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_set_by_pointer(st_bytes, rd_bytes, cntr->bcnt);
@@ -347,12 +347,12 @@ int do_ipfw(int update_every, usec_t dt) {
for (srn = 0; (srn < (static_rules_num - 1)) && (dyn_rules_num[srn].rule_num != IPFW_DEFAULT_RULE); srn++) {
sprintf(rule_num_str, "%d", dyn_rules_num[srn].rule_num);
- rd_active = rrddim_find(st_active, rule_num_str);
+ rd_active = rrddim_find_active(st_active, rule_num_str);
if (unlikely(!rd_active))
rd_active = rrddim_add(st_active, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rrddim_set_by_pointer(st_active, rd_active, dyn_rules_num[srn].active_rules);
- rd_expired = rrddim_find(st_expired, rule_num_str);
+ rd_expired = rrddim_find_active(st_expired, rule_num_str);
if (unlikely(!rd_expired))
rd_expired = rrddim_add(st_expired, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rrddim_set_by_pointer(st_expired, rd_expired, dyn_rules_num[srn].expired_rules);
diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c
index bd9226631..a71ec5604 100644
--- a/collectors/freebsd.plugin/freebsd_sysctl.c
+++ b/collectors/freebsd.plugin/freebsd_sysctl.c
@@ -678,7 +678,7 @@ int do_hw_intcnt(int update_every, usec_t dt) {
p = intrnames + i * (MAXCOMLEN + 1);
if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) {
- RRDDIM *rd_interrupts = rrddim_find(st_interrupts, p);
+ RRDDIM *rd_interrupts = rrddim_find_active(st_interrupts, p);
if (unlikely(!rd_interrupts))
rd_interrupts = rrddim_add(st_interrupts, p, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/collectors/freebsd.plugin/plugin_freebsd.c b/collectors/freebsd.plugin/plugin_freebsd.c
index 5cde37113..bee8395f5 100644
--- a/collectors/freebsd.plugin/plugin_freebsd.c
+++ b/collectors/freebsd.plugin/plugin_freebsd.c
@@ -129,7 +129,7 @@ void *freebsd_main(void *ptr) {
static RRDSET *st = NULL;
if(unlikely(!st)) {
- st = rrdset_find_bytype_localhost("netdata", "plugin_freebsd_modules");
+ st = rrdset_find_active_bytype_localhost("netdata", "plugin_freebsd_modules");
if(!st) {
st = rrdset_create_localhost(
diff --git a/collectors/freeipmi.plugin/Makefile.in b/collectors/freeipmi.plugin/Makefile.in
deleted file mode 100644
index 456af8ab7..000000000
--- a/collectors/freeipmi.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/freeipmi.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md
index 058c2edbc..52945e3c6 100644
--- a/collectors/freeipmi.plugin/README.md
+++ b/collectors/freeipmi.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "freeipmi.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/freeipmi.plugin/README.md
+-->
+
# freeipmi.plugin
Netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin.
@@ -20,7 +25,7 @@ The plugin creates (up to) 8 charts, based on the information collected from IPM
1. number of sensors by state
2. number of events in SEL
-3. Temperatures CELCIUS
+3. Temperatures CELSIUS
4. Temperatures FAHRENHEIT
5. Voltages
6. Currents
@@ -156,13 +161,8 @@ You will get verbose output on what the plugin does.
## kipmi0 CPU usage
-There have been reports that kipmi is showing increased CPU when the IPMI is queried.
-
-[IBM has given a few explanations](http://www-01.ibm.com/support/docview.wss?uid=nas7d580df3d15874988862575fa0050f604).
-
-Check also [this stackexchange post](http://unix.stackexchange.com/questions/74900/kipmi0-eating-up-to-99-8-cpu-on-centos-6-4).
-
-To lower the CPU consumption of the system you can issue this command:
+There have been reports that kipmi is showing increased CPU when the IPMI is queried. To lower the CPU consumption of
+the system you can issue this command:
```sh
echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us
diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c
index 74274ea21..bd3c533ca 100644
--- a/collectors/freeipmi.plugin/freeipmi_plugin.c
+++ b/collectors/freeipmi.plugin/freeipmi_plugin.c
@@ -160,7 +160,7 @@ char *sel_config_file = NULL;
static void
_init_ipmi_config (struct ipmi_monitoring_ipmi_config *ipmi_config)
{
- assert (ipmi_config);
+ fatal_assert(ipmi_config);
ipmi_config->driver_type = driver_type;
ipmi_config->disable_auto_probe = disable_auto_probe;
@@ -1580,7 +1580,7 @@ int ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_config) {
int parse_inband_driver_type (const char *str)
{
- assert (str);
+ fatal_assert(str);
if (strcasecmp (str, IPMI_PARSE_DEVICE_KCS_STR) == 0)
return (IPMI_MONITORING_DRIVER_TYPE_KCS);
@@ -1604,7 +1604,7 @@ int parse_inband_driver_type (const char *str)
int parse_outofband_driver_type (const char *str)
{
- assert (str);
+ fatal_assert(str);
if (strcasecmp (str, IPMI_PARSE_DEVICE_LAN_STR) == 0)
return (IPMI_MONITORING_PROTOCOL_VERSION_1_5);
diff --git a/collectors/idlejitter.plugin/Makefile.in b/collectors/idlejitter.plugin/Makefile.in
deleted file mode 100644
index b98fb4c5c..000000000
--- a/collectors/idlejitter.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/idlejitter.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md
index d1c2998b0..3703e2ee2 100644
--- a/collectors/idlejitter.plugin/README.md
+++ b/collectors/idlejitter.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "idlejitter.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/idlejitter.plugin/README.md
+-->
+
# idlejitter.plugin
It works like this:
diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.c b/collectors/idlejitter.plugin/plugin_idlejitter.c
index 3fe3b0306..c59541ecb 100644
--- a/collectors/idlejitter.plugin/plugin_idlejitter.c
+++ b/collectors/idlejitter.plugin/plugin_idlejitter.c
@@ -54,9 +54,9 @@ void *cpuidlejitter_main(void *ptr) {
if(netdata_exit) break;
while(elapsed < update_every_ut) {
- now_monotonic_timeval(&before);
+ now_monotonic_high_precision_timeval(&before);
sleep_usec(sleep_ut);
- now_monotonic_timeval(&after);
+ now_monotonic_high_precision_timeval(&after);
usec_t dt = dt_usec(&after, &before);
elapsed += dt;
diff --git a/collectors/ioping.plugin/Makefile.in b/collectors/ioping.plugin/Makefile.in
deleted file mode 100644
index 6d7a3ffeb..000000000
--- a/collectors/ioping.plugin/Makefile.in
+++ /dev/null
@@ -1,646 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/ioping.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" \
- "$(DESTDIR)$(libconfigdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- ioping.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_plugins_SCRIPTS = \
- ioping.plugin \
- $(NULL)
-
-dist_noinst_DATA = \
- ioping.plugin.in \
- README.md \
- $(NULL)
-
-dist_libconfig_DATA = \
- ioping.conf \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/ioping.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/ioping.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(am__empty):
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_libconfigDATA \
- install-dist_pluginsSCRIPTS
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_libconfigDATA \
- uninstall-dist_pluginsSCRIPTS
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_libconfigDATA \
- install-dist_pluginsSCRIPTS install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-man install-pdf \
- install-pdf-am install-ps install-ps-am install-strip \
- installcheck installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS
-
-.PRECIOUS: Makefile
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- -e 's#[@]registrydir_POST@#$(registrydir)#g' \
- -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/ioping.plugin/README.md b/collectors/ioping.plugin/README.md
index 4b18ce490..08b0ce8b1 100644
--- a/collectors/ioping.plugin/README.md
+++ b/collectors/ioping.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "ioping.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/ioping.plugin/README.md
+-->
+
# ioping.plugin
The ioping plugin supports monitoring latency for any number of directories/files/devices,
@@ -37,7 +42,7 @@ ioping_opts="-T 1000000 -R"
## alarms
Netdata will automatically attach a few alarms for each host.
-Check the [latest versions of the ioping alarms](../../health/health.d/ioping.conf)
+Check the [latest versions of the ioping alarms](https://raw.githubusercontent.com/netdata/netdata/master/health/health.d/ioping.conf)
## Multiple ioping Plugins With Different Settings
diff --git a/collectors/ioping.plugin/ioping.plugin b/collectors/ioping.plugin/ioping.plugin
deleted file mode 100644
index 648c2e378..000000000
--- a/collectors/ioping.plugin/ioping.plugin
+++ /dev/null
@@ -1,212 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# This plugin requires a latest version of ioping.
-# You can compile it from source, by running me with option: install
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-usage="$(basename "$0") [install] [-h] [-e]
-
-where:
- install install ioping binary
- -e, --env path to environment file (defauls to '/etc/netdata/.environment'
- -h show this help text"
-
-INSTALL=0
-ENVIRONMENT_FILE="/etc/netdata/.environment"
-
-while :; do
- case "$1" in
- -h | --help)
- echo "$usage" >&2
- exit 1
- ;;
- install)
- INSTALL=1
- shift
- ;;
- -e | --env)
- ENVIRONMENT_FILE="$2"
- shift 2
- ;;
- -*)
- echo "$usage" >&2
- exit 1
- ;;
- *) break ;;
- esac
-done
-
-if [ "$INSTALL" == "1" ]
- then
- [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/libexec/netdata/plugins.d/ioping." && exit 1
-
- source "${ENVIRONMENT_FILE}" || exit 1
-
- run() {
- printf >&2 " > "
- printf >&2 "%q " "${@}"
- printf >&2 "\n"
- "${@}" || exit 1
- }
-
- download() {
- local git="$(which git 2>/dev/null || command -v git 2>/dev/null)"
- [ ! -z "${git}" ] && run git clone "${1}" "${2}" && return 0
-
- echo >&2 "Cannot find 'git' in this system." && exit 1
- }
-
- tmp=$(mktemp -d /tmp/netdata-ioping-XXXXXX)
- [ ! -d "${NETDATA_PREFIX}/usr/libexec/netdata" ] && run mkdir -p "${NETDATA_PREFIX}/usr/libexec/netdata"
-
- run cd "${tmp}"
-
- if [ -d ioping-netdata ]
- then
- run rm -rf ioping-netdata || exit 1
- fi
-
- download 'https://github.com/netdata/ioping.git' 'ioping-netdata'
- [ $? -ne 0 ] && exit 1
- run cd ioping-netdata || exit 1
-
- INSTALL_PATH="${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ioping"
-
- run make clean
- run make
- run mv ioping "${INSTALL_PATH}"
- run chown root:"${NETDATA_GROUP}" "${INSTALL_PATH}"
- run chmod 4750 "${INSTALL_PATH}"
- echo >&2
- echo >&2 "All done, you have a compatible ioping now at ${INSTALL_PATH}."
- echo >&2
-
- exit 0
-fi
-
-# -----------------------------------------------------------------------------
-
-PROGRAM_NAME="$(basename "${0}")"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- echo "DISABLE"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-
-# store in ${plugin} the name we run under
-# this allows us to copy/link ioping.plugin under a different name
-# to have multiple ioping plugins running with different settings
-plugin="${PROGRAM_NAME/.plugin/}"
-
-
-# -----------------------------------------------------------------------------
-
-# the frequency to send info to netdata
-# passed by netdata as the first parameter
-update_every="${1-1}"
-
-# the netdata configuration directory
-# passed by netdata as an environment variable
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
-
-# the netdata directory for internal binaries
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="/usr/libexec/netdata/plugins.d"
-
-# -----------------------------------------------------------------------------
-# configuration options
-# can be overwritten at /etc/netdata/ioping.conf
-
-# the ioping binary to use
-# we need one that can output netdata friendly info (supporting: -N)
-# if you have multiple versions, put here the full filename of the right one
-ioping="${NETDATA_PLUGINS_DIR}/ioping"
-
-# the destination to ioping
-destination=""
-
-# the request size in bytes to ping the disk
-request_size="4k"
-
-# ioping options
-ioping_opts="-T 1000000"
-
-# -----------------------------------------------------------------------------
-# load the configuration files
-
-for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"
-do
- if [ -f "${CONFIG}" ]
- then
- info "Loading config file '${CONFIG}'..."
- source "${CONFIG}"
- [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'."
- else
- warning "Cannot find file '${CONFIG}'."
- fi
-done
-
-if [ -z "${destination}" ]
-then
- fatal "destination is not configured - nothing to do."
-fi
-
-if [ ! -f "${ioping}" ]
-then
- fatal "ioping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
-fi
-
-if [ ! -x "${ioping}" ]
-then
- fatal "ioping command '${ioping}' is not executable - cannot proceed."
-fi
-
-# the ioping options we will use
-options=( -N -i ${update_every} -s ${request_size} ${ioping_opts} ${destination} )
-
-# execute ioping
-info "starting ioping: ${ioping} ${options[*]}"
-exec "${ioping}" "${options[@]}"
-
-# if we cannot execute ioping, stop
-fatal "command '${ioping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/macos.plugin/Makefile.in b/collectors/macos.plugin/Makefile.in
deleted file mode 100644
index b84c31ce7..000000000
--- a/collectors/macos.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/macos.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md
index d3fa93698..800eb0e85 100644
--- a/collectors/macos.plugin/README.md
+++ b/collectors/macos.plugin/README.md
@@ -1,6 +1,11 @@
+<!--
+title: "macos.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/macos.plugin/README.md
+-->
+
# macos.plugin
-Collects resource usage and performance data on MacOS systems
+Collects resource usage and performance data on macOS systems
By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
diff --git a/collectors/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c
index f253489a5..d0b3e0fd2 100644
--- a/collectors/macos.plugin/macos_fw.c
+++ b/collectors/macos.plugin/macos_fw.c
@@ -145,7 +145,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
total_disk_writes += diskstat.bytes_write;
}
- st = rrdset_find_bytype_localhost("disk", diskstat.name);
+ st = rrdset_find_active_bytype_localhost("disk", diskstat.name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"disk"
@@ -183,7 +183,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.writes);
}
- st = rrdset_find_bytype_localhost("disk_ops", diskstat.name);
+ st = rrdset_find_active_bytype_localhost("disk_ops", diskstat.name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"disk_ops"
@@ -222,7 +222,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_write);
}
- st = rrdset_find_bytype_localhost("disk_util", diskstat.name);
+ st = rrdset_find_active_bytype_localhost("disk_util", diskstat.name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"disk_util"
@@ -260,7 +260,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_write);
}
- st = rrdset_find_bytype_localhost("disk_iotime", diskstat.name);
+ st = rrdset_find_active_bytype_localhost("disk_iotime", diskstat.name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"disk_iotime"
@@ -297,7 +297,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("disk_await", diskstat.name);
+ st = rrdset_find_active_bytype_localhost("disk_await", diskstat.name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"disk_await"
@@ -328,7 +328,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("disk_avgsz", diskstat.name);
+ st = rrdset_find_active_bytype_localhost("disk_avgsz", diskstat.name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"disk_avgsz"
@@ -359,7 +359,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("disk_svctm", diskstat.name);
+ st = rrdset_find_active_bytype_localhost("disk_svctm", diskstat.name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"disk_svctm"
@@ -401,7 +401,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
}
if (likely(do_io)) {
- st = rrdset_find_bytype_localhost("system", "io");
+ st = rrdset_find_active_bytype_localhost("system", "io");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"system"
@@ -453,7 +453,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------------
if (likely(do_space)) {
- st = rrdset_find_bytype_localhost("disk_space", mntbuf[i].f_mntonname);
+ st = rrdset_find_active_bytype_localhost("disk_space", mntbuf[i].f_mntonname);
if (unlikely(!st)) {
snprintfz(title, 4096, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
st = rrdset_create_localhost(
@@ -486,7 +486,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------------
if (likely(do_inodes)) {
- st = rrdset_find_bytype_localhost("disk_inodes", mntbuf[i].f_mntonname);
+ st = rrdset_find_active_bytype_localhost("disk_inodes", mntbuf[i].f_mntonname);
if (unlikely(!st)) {
snprintfz(title, 4096, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
st = rrdset_create_localhost(
@@ -533,7 +533,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("net", ifa->ifa_name);
+ st = rrdset_find_active_bytype_localhost("net", ifa->ifa_name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"net"
@@ -561,7 +561,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("net_packets", ifa->ifa_name);
+ st = rrdset_find_active_bytype_localhost("net_packets", ifa->ifa_name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"net_packets"
@@ -594,7 +594,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("net_errors", ifa->ifa_name);
+ st = rrdset_find_active_bytype_localhost("net_errors", ifa->ifa_name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"net_errors"
@@ -623,7 +623,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("net_drops", ifa->ifa_name);
+ st = rrdset_find_active_bytype_localhost("net_drops", ifa->ifa_name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"net_drops"
@@ -650,7 +650,7 @@ int do_macos_iokit(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("net_events", ifa->ifa_name);
+ st = rrdset_find_active_bytype_localhost("net_events", ifa->ifa_name);
if (unlikely(!st)) {
st = rrdset_create_localhost(
"net_events"
diff --git a/collectors/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c
index 800b2ce56..973b90a20 100644
--- a/collectors/macos.plugin/macos_mach_smi.c
+++ b/collectors/macos.plugin/macos_mach_smi.c
@@ -55,7 +55,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
error("DISABLED: system.cpu");
} else {
- st = rrdset_find_bytype_localhost("system", "cpu");
+ st = rrdset_find_active_bytype_localhost("system", "cpu");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"system"
@@ -109,7 +109,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
error("DISABLED: mem.pgfaults");
} else {
if (likely(do_ram)) {
- st = rrdset_find_localhost("system.ram");
+ st = rrdset_find_active_localhost("system.ram");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"system"
@@ -156,7 +156,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_swapio)) {
- st = rrdset_find_localhost("system.swapio");
+ st = rrdset_find_active_localhost("system.swapio");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"system"
@@ -187,7 +187,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_pgfaults)) {
- st = rrdset_find_localhost("mem.pgfaults");
+ st = rrdset_find_active_localhost("mem.pgfaults");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"mem"
diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c
index dddafc9f5..84f754185 100644
--- a/collectors/macos.plugin/macos_sysctl.c
+++ b/collectors/macos.plugin/macos_sysctl.c
@@ -230,7 +230,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
error("DISABLED: system.load");
} else {
- st = rrdset_find_bytype_localhost("system", "load");
+ st = rrdset_find_active_bytype_localhost("system", "load");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"system"
@@ -270,7 +270,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
do_swap = 0;
error("DISABLED: system.swap");
} else {
- st = rrdset_find_localhost("system.swap");
+ st = rrdset_find_active_localhost("system.swap");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"system"
@@ -332,7 +332,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
iftot.ift_obytes += if2m->ifm_data.ifi_obytes;
}
}
- st = rrdset_find_localhost("system.ipv4");
+ st = rrdset_find_active_localhost("system.ipv4");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"system"
@@ -382,7 +382,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
error("DISABLED: ipv4.ecnpkts");
} else {
if (likely(do_tcp_packets)) {
- st = rrdset_find_localhost("ipv4.tcppackets");
+ st = rrdset_find_active_localhost("ipv4.tcppackets");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -412,7 +412,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_tcp_errors)) {
- st = rrdset_find_localhost("ipv4.tcperrors");
+ st = rrdset_find_active_localhost("ipv4.tcperrors");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -445,7 +445,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_tcp_handshake)) {
- st = rrdset_find_localhost("ipv4.tcphandshake");
+ st = rrdset_find_active_localhost("ipv4.tcphandshake");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -486,7 +486,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
tcpstat.tcps_persistdrop ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.tcpconnaborts");
+ st = rrdset_find_active_localhost("ipv4.tcpconnaborts");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -523,7 +523,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
(tcpstat.tcps_rcvoopack ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_tcpext_ofo = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.tcpofo");
+ st = rrdset_find_active_localhost("ipv4.tcpofo");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -557,7 +557,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_tcpext_syscookies = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.tcpsyncookies");
+ st = rrdset_find_active_localhost("ipv4.tcpsyncookies");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -595,7 +595,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
tcpstat.tcps_ecn_not_supported ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_ecn = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv4.ecnpkts");
+ st = rrdset_find_active_localhost("ipv4.ecnpkts");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -638,7 +638,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
error("DISABLED: ipv4.udperrors");
} else {
if (likely(do_udp_packets)) {
- st = rrdset_find_localhost("ipv4.udppackets");
+ st = rrdset_find_active_localhost("ipv4.udppackets");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -668,7 +668,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_udp_errors)) {
- st = rrdset_find_localhost("ipv4.udperrors");
+ st = rrdset_find_active_localhost("ipv4.udperrors");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -729,7 +729,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_icmp_packets)) {
- st = rrdset_find_localhost("ipv4.icmp");
+ st = rrdset_find_active_localhost("ipv4.icmp");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -758,7 +758,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_localhost("ipv4.icmp_errors");
+ st = rrdset_find_active_localhost("ipv4.icmp_errors");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -791,7 +791,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_icmpmsg)) {
- st = rrdset_find_localhost("ipv4.icmpmsg");
+ st = rrdset_find_active_localhost("ipv4.icmpmsg");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -840,7 +840,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
error("DISABLED: ipv4.errors");
} else {
if (likely(do_ip_packets)) {
- st = rrdset_find_localhost("ipv4.packets");
+ st = rrdset_find_active_localhost("ipv4.packets");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -874,7 +874,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_ip_fragsout)) {
- st = rrdset_find_localhost("ipv4.fragsout");
+ st = rrdset_find_active_localhost("ipv4.fragsout");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -907,7 +907,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_ip_fragsin)) {
- st = rrdset_find_localhost("ipv4.fragsin");
+ st = rrdset_find_active_localhost("ipv4.fragsin");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -940,7 +940,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
// --------------------------------------------------------------------
if (likely(do_ip_errors)) {
- st = rrdset_find_localhost("ipv4.errors");
+ st = rrdset_find_active_localhost("ipv4.errors");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv4"
@@ -1000,7 +1000,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
ip6stat.ip6s_delivered ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_ip6_packets = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.packets");
+ st = rrdset_find_active_localhost("ipv6.packets");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1039,7 +1039,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
ip6stat.ip6s_ofragments ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_ip6_fragsout = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.fragsout");
+ st = rrdset_find_active_localhost("ipv6.fragsout");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1078,7 +1078,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
ip6stat.ip6s_fragments ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_ip6_fragsin = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.fragsin");
+ st = rrdset_find_active_localhost("ipv6.fragsin");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1124,7 +1124,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
ip6stat.ip6s_noroute ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_ip6_errors = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.errors");
+ st = rrdset_find_active_localhost("ipv6.errors");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1186,7 +1186,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
icmp6_total.msgs_out ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_icmp6 = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.icmp");
+ st = rrdset_find_active_localhost("ipv6.icmp");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1220,7 +1220,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
icmp6stat.icp6s_outhist[ND_REDIRECT] ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_icmp6_redir = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.icmpredir");
+ st = rrdset_find_active_localhost("ipv6.icmpredir");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1263,7 +1263,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB] ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_icmp6_errors = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.icmperrors");
+ st = rrdset_find_active_localhost("ipv6.icmperrors");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1316,7 +1316,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY] ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_icmp6_echos = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.icmpechos");
+ st = rrdset_find_active_localhost("ipv6.icmpechos");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1356,7 +1356,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT] ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_icmp6_router = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.icmprouter");
+ st = rrdset_find_active_localhost("ipv6.icmprouter");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1396,7 +1396,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT] ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_icmp6_neighbor = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.icmpneighbor");
+ st = rrdset_find_active_localhost("ipv6.icmpneighbor");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1442,7 +1442,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
icmp6stat.icp6s_outhist[136] ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
do_icmp6_types = CONFIG_BOOLEAN_YES;
- st = rrdset_find_localhost("ipv6.icmptypes");
+ st = rrdset_find_active_localhost("ipv6.icmptypes");
if (unlikely(!st)) {
st = rrdset_create_localhost(
"ipv6"
@@ -1495,7 +1495,7 @@ int do_macos_sysctl(int update_every, usec_t dt) {
error("DISABLED: system.uptime");
} else {
clock_gettime(CLOCK_REALTIME, &cur_time);
- st = rrdset_find_localhost("system.uptime");
+ st = rrdset_find_active_localhost("system.uptime");
if(unlikely(!st)) {
st = rrdset_create_localhost(
diff --git a/collectors/nfacct.plugin/Makefile.in b/collectors/nfacct.plugin/Makefile.in
deleted file mode 100644
index 734983a41..000000000
--- a/collectors/nfacct.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/nfacct.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md
index 64bb1f710..1bd8a46e8 100644
--- a/collectors/nfacct.plugin/README.md
+++ b/collectors/nfacct.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "nfacct.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/nfacct.plugin/README.md
+-->
+
# nfacct.plugin
`nfacct.plugin` collects Netfilter statistics.
diff --git a/collectors/nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c
index 21c2e4aee..996070f1c 100644
--- a/collectors/nfacct.plugin/plugin_nfacct.c
+++ b/collectors/nfacct.plugin/plugin_nfacct.c
@@ -762,6 +762,30 @@ static void nfacct_send_metrics() {
#endif // HAVE_LIBNETFILTER_ACCT
+static void nfacct_signal_handler(int signo)
+{
+ exit((signo == SIGPIPE)?1:0);
+}
+
+// When Netdata crashes this plugin was becoming zombie,
+// this function was added to remove it when sigpipe and other signals are received.
+void nfacct_signals()
+{
+ int signals[] = { SIGPIPE, SIGINT, SIGTERM, 0};
+ int i;
+ struct sigaction sa;
+ sa.sa_flags = 0;
+ sa.sa_handler = nfacct_signal_handler;
+
+ // ignore all signals while we run in a signal handler
+ sigfillset(&sa.sa_mask);
+
+ for (i = 0; signals[i]; i++) {
+ if(sigaction(signals[i], &sa, NULL) == -1)
+ error("Cannot add the handler to signal %d", signals[i]);
+ }
+}
+
int main(int argc, char **argv) {
// ------------------------------------------------------------------------
@@ -833,6 +857,8 @@ int main(int argc, char **argv) {
error("nfacct.plugin: ignoring parameter '%s'", argv[i]);
}
+ nfacct_signals();
+
errno = 0;
if(freq >= netdata_update_every)
diff --git a/collectors/node.d.plugin/.keep b/collectors/node.d.plugin/.keep
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/node.d.plugin/.keep
+++ /dev/null
diff --git a/collectors/node.d.plugin/Makefile.am b/collectors/node.d.plugin/Makefile.am
index 411bce9ec..c3142d433 100644
--- a/collectors/node.d.plugin/Makefile.am
+++ b/collectors/node.d.plugin/Makefile.am
@@ -23,7 +23,6 @@ dist_noinst_DATA = \
usernodeconfigdir=$(configdir)/node.d
dist_usernodeconfig_DATA = \
- .keep \
$(NULL)
# Explicitly install directories to avoid permission issues due to umask
diff --git a/collectors/node.d.plugin/Makefile.in b/collectors/node.d.plugin/Makefile.in
deleted file mode 100644
index b057e1eb4..000000000
--- a/collectors/node.d.plugin/Makefile.in
+++ /dev/null
@@ -1,865 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/node.d.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_libconfig_DATA) $(dist_node_DATA) \
- $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \
- $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \
- $(dist_usernodeconfig_DATA) $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" \
- "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" \
- "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" \
- "$(DESTDIR)$(nodemoduleslibberdir)" \
- "$(DESTDIR)$(usernodeconfigdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_libconfig_DATA) $(dist_node_DATA) \
- $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \
- $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \
- $(dist_usernodeconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/fronius/Makefile.inc \
- $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc \
- $(srcdir)/snmp/Makefile.inc \
- $(srcdir)/stiebeleltron/Makefile.inc \
- $(top_srcdir)/build/subst.inc
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- node.d.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_libconfig_DATA = \
- node.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- node.d.plugin \
- $(NULL)
-
-# dist_nodeconfig_DATA += fronius/fronius.conf
-
-# do not install these files, but include them in the distribution
-# dist_nodeconfig_DATA += named/named.conf
-
-# do not install these files, but include them in the distribution
-# dist_nodeconfig_DATA += sma_webbox/sma_webbox.conf
-
-# do not install these files, but include them in the distribution
-# dist_nodeconfig_DATA += snmp/snmp.conf
-
-# do not install these files, but include them in the distribution
-# dist_nodeconfig_DATA += stiebeleltron/stiebeleltron.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA = node.d.plugin.in README.md $(NULL) \
- fronius/README.md fronius/Makefile.inc named/README.md \
- named/Makefile.inc sma_webbox/README.md \
- sma_webbox/Makefile.inc snmp/README.md snmp/Makefile.inc \
- stiebeleltron/README.md stiebeleltron/Makefile.inc
-usernodeconfigdir = $(configdir)/node.d
-dist_usernodeconfig_DATA = \
- .keep \
- $(NULL)
-
-nodeconfigdir = $(libconfigdir)/node.d
-dist_nodeconfig_DATA = \
- $(NULL)
-
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-dist_node_DATA = $(NULL) fronius/fronius.node.js named/named.node.js \
- sma_webbox/sma_webbox.node.js snmp/snmp.node.js \
- stiebeleltron/stiebeleltron.node.js
-nodemodulesdir = $(nodedir)/node_modules
-dist_nodemodules_DATA = \
- node_modules/netdata.js \
- node_modules/extend.js \
- node_modules/pixl-xml.js \
- node_modules/net-snmp.js \
- node_modules/asn1-ber.js \
- $(NULL)
-
-nodemoduleslibberdir = $(nodedir)/node_modules/lib/ber
-dist_nodemoduleslibber_DATA = \
- node_modules/lib/ber/index.js \
- node_modules/lib/ber/errors.js \
- node_modules/lib/ber/reader.js \
- node_modules/lib/ber/types.js \
- node_modules/lib/ber/writer.js \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc $(am__empty):
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_nodeDATA: $(dist_node_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(nodedir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(nodedir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(nodedir)" || exit $$?; \
- done
-
-uninstall-dist_nodeDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(nodedir)'; $(am__uninstall_files_from_dir)
-install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodeconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(nodeconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_nodeconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_nodemodulesDATA: $(dist_nodemodules_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(nodemodulesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemodulesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemodulesdir)" || exit $$?; \
- done
-
-uninstall-dist_nodemodulesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(nodemodulesdir)'; $(am__uninstall_files_from_dir)
-install-dist_nodemoduleslibberDATA: $(dist_nodemoduleslibber_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(nodemoduleslibberdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(nodemoduleslibberdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemoduleslibberdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemoduleslibberdir)" || exit $$?; \
- done
-
-uninstall-dist_nodemoduleslibberDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(nodemoduleslibberdir)'; $(am__uninstall_files_from_dir)
-install-dist_usernodeconfigDATA: $(dist_usernodeconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(usernodeconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(usernodeconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(usernodeconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(usernodeconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_usernodeconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(usernodeconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" "$(DESTDIR)$(nodemoduleslibberdir)" "$(DESTDIR)$(usernodeconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_libconfigDATA install-dist_nodeDATA \
- install-dist_nodeconfigDATA install-dist_nodemodulesDATA \
- install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \
- install-dist_usernodeconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am: install-exec-local
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \
- uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \
- uninstall-dist_nodemoduleslibberDATA \
- uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_usernodeconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_libconfigDATA \
- install-dist_nodeDATA install-dist_nodeconfigDATA \
- install-dist_nodemodulesDATA \
- install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \
- install-dist_usernodeconfigDATA install-dvi install-dvi-am \
- install-exec install-exec-am install-exec-local install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_libconfigDATA \
- uninstall-dist_nodeDATA uninstall-dist_nodeconfigDATA \
- uninstall-dist_nodemodulesDATA \
- uninstall-dist_nodemoduleslibberDATA \
- uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_usernodeconfigDATA
-
-.PRECIOUS: Makefile
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- -e 's#[@]registrydir_POST@#$(registrydir)#g' \
- -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(usernodeconfigdir)
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/node.d.plugin/README.md b/collectors/node.d.plugin/README.md
index 6d7c1f871..8db80d85b 100644
--- a/collectors/node.d.plugin/README.md
+++ b/collectors/node.d.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "node.d.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/README.md
+-->
+
# node.d.plugin
`node.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `node.js`.
@@ -33,19 +38,18 @@ collectors in node.js. It also manages all its data collectors (placed in `/usr/
instance of node, thus lowering the memory footprint of data collection.
Of course, there can be independent plugins written in node.js (placed in `/usr/libexec/netdata/plugins`).
-These will have to be developed using the guidelines of **[External Plugins](../plugins.d/)**.
+These will have to be developed using the guidelines of **[External Plugins](/collectors/plugins.d/README.md)**.
To run `node.js` plugins you need to have `node` installed in your system.
In some older systems, the package named `node` is not node.js. It is a terminal emulation program called `ax25-node`.
In this case the node.js package may be referred as `nodejs`. Once you install `nodejs`, we suggest to link
`/usr/bin/nodejs` to `/usr/bin/node`, so that typing `node` in your terminal, opens node.js.
-For more information check the **\[[Installation]]** guide.
## configuring `node.d.plugin`
`node.d.plugin` can work even without any configuration. Its default configuration file is
-[/etc/netdata/node.d.conf](node.d.conf) (to edit it on your system run `/etc/netdata/edit-config node.d.conf`).
+`node.d.conf`. To edit it on your system, run `/etc/netdata/edit-config node.d.conf`.
## configuring `node.d.plugin` modules
@@ -227,6 +231,6 @@ The `service` object defines a set of functions to allow you send information to
---
_FIXME: document an operational node.d.plugin data collector - the best example is the
-[snmp collector](snmp/snmp.node.js)_
+[snmp collector](https://raw.githubusercontent.com/netdata/netdata/master/collectors/node.d.plugin/snmp/snmp.node.js)_
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/node.d.plugin/fronius/README.md b/collectors/node.d.plugin/fronius/README.md
index f109f7995..746737d0b 100644
--- a/collectors/node.d.plugin/fronius/README.md
+++ b/collectors/node.d.plugin/fronius/README.md
@@ -1,6 +1,12 @@
-# fronius
+<!--
+title: "Fronius Symo monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/fronius/README.md
+sidebar_label: "Fronius Symo"
+-->
-This module collects metrics from the configured solar power installation from Fronius Symo.
+# Fronius Symo monitoring with Netdata
+
+Collects metrics from the configured solar power installation from Fronius Symo.
**Requirements**
diff --git a/collectors/node.d.plugin/named/README.md b/collectors/node.d.plugin/named/README.md
index 288292ba8..acd03f687 100644
--- a/collectors/node.d.plugin/named/README.md
+++ b/collectors/node.d.plugin/named/README.md
@@ -1,6 +1,12 @@
-# ISC Bind Statistics
+<!--
+title: "ISC BIND monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/named/README.md
+sidebar_label: "ISC BIND"
+-->
-Using this Netdata collector, you can monitor one or more ISC Bind servers.
+# ISC BIND monitoring with Netdata
+
+Monitor one or more ISC Bind servers.
## Example Netdata charts
diff --git a/collectors/node.d.plugin/node.d.plugin b/collectors/node.d.plugin/node.d.plugin
deleted file mode 100644
index 57369d0d8..000000000
--- a/collectors/node.d.plugin/node.d.plugin
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/env bash
-':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
-
-// shebang hack from:
-// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
-
-// Initially this is run as a shell script.
-// Then, the second line, finds nodejs or node or js in the system path
-// and executes it with the shell parameters.
-
-// netdata
-// real-time performance and health monitoring, done right!
-// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-// --------------------------------------------------------------------------------------------------------------------
-
-'use strict';
-
-// --------------------------------------------------------------------------------------------------------------------
-// get NETDATA environment variables
-
-var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
-var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '/etc/netdata';
-var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '/usr/lib/netdata/conf.d';
-var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
-var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
-
-// make sure the modules are found
-process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
-process.mainModule.paths.unshift(NODE_D_DIR);
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// load required modules
-
-var fs = require('fs');
-var url = require('url');
-var util = require('util');
-var http = require('http');
-var path = require('path');
-var extend = require('extend');
-var netdata = require('netdata');
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// configuration
-
-function netdata_read_json_config_file(module_filename) {
- var f = path.basename(module_filename);
-
- var ufilename, sfilename;
-
- var m = f.match('.plugin' + '$');
- if(m !== null) {
- ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
- sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
- }
-
- m = f.match('.node.js' + '$');
- if(m !== null) {
- ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
- sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
- }
-
- try {
- netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
- return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
- }
- catch(e) {
- netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
- dumpError(e);
- }
-
- try {
- netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
- return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
- }
- catch(e) {
- netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
- dumpError(e);
- }
-
- return {};
-}
-
-// internal defaults
-extend(true, netdata.options, {
- filename: path.basename(__filename),
-
- update_every: NETDATA_UPDATE_EVERY,
-
- paths: {
- plugins: NETDATA_PLUGINS_DIR,
- config: NETDATA_USER_CONFIG_DIR,
- stock_config: NETDATA_STOCK_CONFIG_DIR,
- modules: []
- },
-
- modules_enable_autodetect: true,
- modules_enable_all: true,
- modules: {}
-});
-
-// load configuration file
-netdata.options_loaded = netdata_read_json_config_file(__filename);
-extend(true, netdata.options, netdata.options_loaded);
-
-if(!netdata.options.paths.plugins)
- netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
-
-if(!netdata.options.paths.config)
- netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
-
-if(!netdata.options.paths.stock_config)
- netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
-
-// console.error('merged netdata object:');
-// console.error(util.inspect(netdata, {depth: 10}));
-
-
-// apply module paths to node.js process
-function applyModulePaths() {
- var len = netdata.options.paths.modules.length;
- while(len--)
- process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
-}
-applyModulePaths();
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// tracing
-
-function dumpError(err) {
- if (typeof err === 'object') {
- if (err.stack) {
- netdata.debug(err.stack);
- }
- }
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// get command line arguments
-{
- var found_myself = false;
- var found_number = false;
- var found_modules = false;
- process.argv.forEach(function (val, index, array) {
- netdata.debug('PARAM: ' + val);
-
- if(!found_myself) {
- if(val === __filename)
- found_myself = true;
- }
- else {
- switch(val) {
- case 'debug':
- netdata.options.DEBUG = true;
- netdata.debug('DEBUG enabled');
- break;
-
- default:
- if(found_number === true) {
- if(found_modules === false) {
- for(var i in netdata.options.modules)
- netdata.options.modules[i].enabled = false;
- }
-
- if(typeof netdata.options.modules[val] === 'undefined')
- netdata.options.modules[val] = {};
-
- netdata.options.modules[val].enabled = true;
- netdata.options.modules_enable_all = false;
- netdata.debug('enabled module ' + val);
- }
- else {
- try {
- var x = parseInt(val);
- if(x > 0) {
- netdata.options.update_every = x;
- if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
- netdata.options.update_every = NETDATA_UPDATE_EVERY;
- netdata.debug('Update frequency ' + x + 's is too low');
- }
-
- found_number = true;
- netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
- }
- else netdata.error('Ignoring parameter: ' + val);
- }
- catch(e) {
- netdata.error('Cannot get value of parameter: ' + val);
- dumpError(e);
- }
- }
- break;
- }
- }
- });
-}
-
-if(netdata.options.update_every < 1) {
- netdata.debug('Adjusting update frequency to 1 second');
- netdata.options.update_every = 1;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// find modules
-
-function findModules() {
- var found = 0;
-
- var files = fs.readdirSync(NODE_D_DIR);
- var len = files.length;
- while(len--) {
- var m = files[len].match('.node.js' + '$');
- if(m !== null) {
- var n = files[len].substring(0, m.index);
-
- if(typeof(netdata.options.modules[n]) === 'undefined')
- netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
-
- if(netdata.options.modules[n].enabled === true) {
- netdata.options.modules[n].name = n;
- netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
- netdata.options.modules[n].loaded = false;
-
- // load the module
- try {
- netdata.debug('loading module ' + netdata.options.modules[n].filename);
- netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
- netdata.options.modules[n].module.name = n;
- netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
- }
- catch(e) {
- netdata.options.modules[n].enabled = false;
- netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
- dumpError(e);
- continue;
- }
-
- // load its configuration
- var c = {
- enable_autodetect: netdata.options.modules_enable_autodetect,
- update_every: netdata.options.update_every
- };
-
- var c2 = netdata_read_json_config_file(files[len]);
- extend(true, c, c2);
-
- // call module auto-detection / configuration
- try {
- netdata.modules_configuring++;
- netdata.debug('Configuring module ' + netdata.options.modules[n].name);
- var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
- netdata.debug('Configured module ' + netdata.options.modules[n].name);
- netdata.modules_configuring--;
- });
-
- netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
- }
- catch(e) {
- netdata.modules_configuring--;
- netdata.options.modules[n].enabled = false;
- netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
- dumpError(e);
- continue;
- }
-
- netdata.options.modules[n].loaded = true;
- found++;
- }
- }
- }
-
- // netdata.debug(netdata.options.modules);
- return found;
-}
-
-if(findModules() === 0) {
- netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
- netdata.disableNodePlugin();
- process.exit(1);
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// start
-
-function start_when_configuring_ends() {
- if(netdata.modules_configuring > 0) {
- netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
- setTimeout(start_when_configuring_ends, 500);
- return;
- }
-
- netdata.modules_configuring = 0;
- netdata.start();
-}
-start_when_configuring_ends();
-
-//netdata.debug('netdata object:')
-//netdata.debug(netdata);
diff --git a/collectors/node.d.plugin/node_modules/net-snmp.js b/collectors/node.d.plugin/node_modules/net-snmp.js
index 484597dcb..6b5b754ea 100644
--- a/collectors/node.d.plugin/node_modules/net-snmp.js
+++ b/collectors/node.d.plugin/node_modules/net-snmp.js
@@ -1,66 +1,77 @@
-
// Copyright 2013 Stephen Vickers <stephen.vickers.sv@gmail.com>
// SPDX-License-Identifier: MIT
-var ber = require ("asn1-ber").Ber;
-var dgram = require ("dgram");
-var events = require ("events");
-var util = require ("util");
+var ber = require("asn1-ber").Ber;
+var dgram = require("dgram");
+var events = require("events");
+var util = require("util");
+var crypto = require("crypto");
+
+var DEBUG = false;
+
+var MAX_INT32 = 2147483647;
+
+function debug(line) {
+ if (DEBUG) {
+ console.debug(line);
+ }
+}
/*****************************************************************************
** Constants
**/
-function _expandConstantObject (object) {
- var keys = [];
- for (var key in object)
- keys.push (key);
- for (var i = 0; i < keys.length; i++)
- object[object[keys[i]]] = parseInt (keys[i]);
+
+function _expandConstantObject(object) {
+ var keys = [];
+ for (var key in object)
+ keys.push(key);
+ for (var i = 0; i < keys.length; i++)
+ object[object[keys[i]]] = parseInt(keys[i]);
}
var ErrorStatus = {
- 0: "NoError",
- 1: "TooBig",
- 2: "NoSuchName",
- 3: "BadValue",
- 4: "ReadOnly",
- 5: "GeneralError",
- 6: "NoAccess",
- 7: "WrongType",
- 8: "WrongLength",
- 9: "WrongEncoding",
- 10: "WrongValue",
- 11: "NoCreation",
- 12: "InconsistentValue",
- 13: "ResourceUnavailable",
- 14: "CommitFailed",
- 15: "UndoFailed",
- 16: "AuthorizationError",
- 17: "NotWritable",
- 18: "InconsistentName"
-};
-
-_expandConstantObject (ErrorStatus);
+ 0: "NoError",
+ 1: "TooBig",
+ 2: "NoSuchName",
+ 3: "BadValue",
+ 4: "ReadOnly",
+ 5: "GeneralError",
+ 6: "NoAccess",
+ 7: "WrongType",
+ 8: "WrongLength",
+ 9: "WrongEncoding",
+ 10: "WrongValue",
+ 11: "NoCreation",
+ 12: "InconsistentValue",
+ 13: "ResourceUnavailable",
+ 14: "CommitFailed",
+ 15: "UndoFailed",
+ 16: "AuthorizationError",
+ 17: "NotWritable",
+ 18: "InconsistentName"
+};
+
+_expandConstantObject(ErrorStatus);
var ObjectType = {
- 1: "Boolean",
- 2: "Integer",
- 4: "OctetString",
- 5: "Null",
- 6: "OID",
- 64: "IpAddress",
- 65: "Counter",
- 66: "Gauge",
- 67: "TimeTicks",
- 68: "Opaque",
- 70: "Counter64",
- 128: "NoSuchObject",
- 129: "NoSuchInstance",
- 130: "EndOfMibView"
-};
-
-_expandConstantObject (ObjectType);
+ 1: "Boolean",
+ 2: "Integer",
+ 4: "OctetString",
+ 5: "Null",
+ 6: "OID",
+ 64: "IpAddress",
+ 65: "Counter",
+ 66: "Gauge",
+ 67: "TimeTicks",
+ 68: "Opaque",
+ 70: "Counter64",
+ 128: "NoSuchObject",
+ 129: "NoSuchInstance",
+ 130: "EndOfMibView"
+};
+
+_expandConstantObject(ObjectType);
ObjectType.Integer32 = ObjectType.Integer;
ObjectType.Counter32 = ObjectType.Counter;
@@ -68,132 +79,173 @@ ObjectType.Gauge32 = ObjectType.Gauge;
ObjectType.Unsigned32 = ObjectType.Gauge32;
var PduType = {
- 160: "GetRequest",
- 161: "GetNextRequest",
- 162: "GetResponse",
- 163: "SetRequest",
- 164: "Trap",
- 165: "GetBulkRequest",
- 166: "InformRequest",
- 167: "TrapV2",
- 168: "Report"
+ 160: "GetRequest",
+ 161: "GetNextRequest",
+ 162: "GetResponse",
+ 163: "SetRequest",
+ 164: "Trap",
+ 165: "GetBulkRequest",
+ 166: "InformRequest",
+ 167: "TrapV2",
+ 168: "Report"
};
-_expandConstantObject (PduType);
+_expandConstantObject(PduType);
var TrapType = {
- 0: "ColdStart",
- 1: "WarmStart",
- 2: "LinkDown",
- 3: "LinkUp",
- 4: "AuthenticationFailure",
- 5: "EgpNeighborLoss",
- 6: "EnterpriseSpecific"
+ 0: "ColdStart",
+ 1: "WarmStart",
+ 2: "LinkDown",
+ 3: "LinkUp",
+ 4: "AuthenticationFailure",
+ 5: "EgpNeighborLoss",
+ 6: "EnterpriseSpecific"
+};
+
+_expandConstantObject(TrapType);
+
+var SecurityLevel = {
+ 1: "noAuthNoPriv",
+ 2: "authNoPriv",
+ 3: "authPriv"
+};
+
+_expandConstantObject(SecurityLevel);
+
+var AuthProtocols = {
+ "1": "none",
+ "2": "md5",
+ "3": "sha"
+};
+
+_expandConstantObject(AuthProtocols);
+
+var PrivProtocols = {
+ "1": "none",
+ "2": "des"
+};
+
+_expandConstantObject(PrivProtocols);
+
+var MibProviderType = {
+ "1": "Scalar",
+ "2": "Table"
};
-_expandConstantObject (TrapType);
+_expandConstantObject(MibProviderType);
var Version1 = 0;
var Version2c = 1;
+var Version3 = 3;
+
+var Version = {
+ "1": Version1,
+ "2c": Version2c,
+ "3": Version3
+};
/*****************************************************************************
** Exception class definitions
**/
-function ResponseInvalidError (message) {
- this.name = "ResponseInvalidError";
- this.message = message;
- Error.captureStackTrace(this, ResponseInvalidError);
+function ResponseInvalidError(message) {
+ this.name = "ResponseInvalidError";
+ this.message = message;
+ Error.captureStackTrace(this, ResponseInvalidError);
}
-util.inherits (ResponseInvalidError, Error);
-function RequestInvalidError (message) {
- this.name = "RequestInvalidError";
- this.message = message;
- Error.captureStackTrace(this, RequestInvalidError);
+util.inherits(ResponseInvalidError, Error);
+
+function RequestInvalidError(message) {
+ this.name = "RequestInvalidError";
+ this.message = message;
+ Error.captureStackTrace(this, RequestInvalidError);
}
-util.inherits (RequestInvalidError, Error);
-function RequestFailedError (message, status) {
- this.name = "RequestFailedError";
- this.message = message;
- this.status = status;
- Error.captureStackTrace(this, RequestFailedError);
+util.inherits(RequestInvalidError, Error);
+
+function RequestFailedError(message, status) {
+ this.name = "RequestFailedError";
+ this.message = message;
+ this.status = status;
+ Error.captureStackTrace(this, RequestFailedError);
}
-util.inherits (RequestFailedError, Error);
-function RequestTimedOutError (message) {
- this.name = "RequestTimedOutError";
- this.message = message;
- Error.captureStackTrace(this, RequestTimedOutError);
+util.inherits(RequestFailedError, Error);
+
+function RequestTimedOutError(message) {
+ this.name = "RequestTimedOutError";
+ this.message = message;
+ Error.captureStackTrace(this, RequestTimedOutError);
}
-util.inherits (RequestTimedOutError, Error);
+
+util.inherits(RequestTimedOutError, Error);
/*****************************************************************************
** OID and varbind helper functions
**/
-function isVarbindError (varbind) {
- return !!(varbind.type == ObjectType.NoSuchObject
- || varbind.type == ObjectType.NoSuchInstance
- || varbind.type == ObjectType.EndOfMibView);
+function isVarbindError(varbind) {
+ return !!(varbind.type == ObjectType.NoSuchObject
+ || varbind.type == ObjectType.NoSuchInstance
+ || varbind.type == ObjectType.EndOfMibView);
}
-function varbindError (varbind) {
- return (ObjectType[varbind.type] || "NotAnError") + ": " + varbind.oid;
+function varbindError(varbind) {
+ return (ObjectType[varbind.type] || "NotAnError") + ": " + varbind.oid;
}
-function oidFollowsOid (oidString, nextString) {
- var oid = {str: oidString, len: oidString.length, idx: 0};
- var next = {str: nextString, len: nextString.length, idx: 0};
- var dotCharCode = ".".charCodeAt (0);
-
- function getNumber (item) {
- var n = 0;
- if (item.idx >= item.len)
- return null;
- while (item.idx < item.len) {
- var charCode = item.str.charCodeAt (item.idx++);
- if (charCode == dotCharCode)
- return n;
- n = (n ? (n * 10) : n) + (charCode - 48);
- }
- return n;
- }
-
- while (1) {
- var oidNumber = getNumber (oid);
- var nextNumber = getNumber (next);
-
- if (oidNumber !== null) {
- if (nextNumber !== null) {
- if (nextNumber > oidNumber) {
- return true;
- } else if (nextNumber < oidNumber) {
- return false;
- }
- } else {
- return true;
- }
- } else {
- return true;
- }
- }
+function oidFollowsOid(oidString, nextString) {
+ var oid = {str: oidString, len: oidString.length, idx: 0};
+ var next = {str: nextString, len: nextString.length, idx: 0};
+ var dotCharCode = ".".charCodeAt(0);
+
+ function getNumber(item) {
+ var n = 0;
+ if (item.idx >= item.len)
+ return null;
+ while (item.idx < item.len) {
+ var charCode = item.str.charCodeAt(item.idx++);
+ if (charCode == dotCharCode)
+ return n;
+ n = (n ? (n * 10) : n) + (charCode - 48);
+ }
+ return n;
+ }
+
+ while (1) {
+ var oidNumber = getNumber(oid);
+ var nextNumber = getNumber(next);
+
+ if (oidNumber !== null) {
+ if (nextNumber !== null) {
+ if (nextNumber > oidNumber) {
+ return true;
+ } else if (nextNumber < oidNumber) {
+ return false;
+ }
+ } else {
+ return true;
+ }
+ } else {
+ return true;
+ }
+ }
}
-function oidInSubtree (oidString, nextString) {
- var oid = oidString.split (".");
- var next = nextString.split (".");
+function oidInSubtree(oidString, nextString) {
+ var oid = oidString.split(".");
+ var next = nextString.split(".");
- if (oid.length > next.length)
- return false;
+ if (oid.length > next.length)
+ return false;
- for (var i = 0; i < oid.length; i++) {
- if (next[i] != oid[i])
- return false;
- }
+ for (var i = 0; i < oid.length; i++) {
+ if (next[i] != oid[i])
+ return false;
+ }
- return true;
+ return true;
}
/**
@@ -207,1228 +259,3138 @@ function oidInSubtree (oidString, nextString) {
** an error since the integer is too large.
**/
-function readInt (buffer) {
- return readUint (buffer, true);
+function readInt(buffer) {
+ return readUint(buffer, true);
+}
+
+function readIpAddress(buffer) {
+ var bytes = buffer.readString(ObjectType.IpAddress, true);
+ if (bytes.length != 4)
+ throw new ResponseInvalidError("Length '" + bytes.length
+ + "' of IP address '" + bytes.toString("hex")
+ + "' is not 4");
+ var value = bytes[0] + "." + bytes[1] + "." + bytes[2] + "." + bytes[3];
+ return value;
}
-function readUint (buffer, isSigned) {
- buffer.readByte ();
- var length = buffer.readByte ();
- var value = 0;
- var signedBitSet = false;
-
- if (length > 5) {
- throw new RangeError ("Integer too long '" + length + "'");
- } else if (length == 5) {
- if (buffer.readByte () !== 0)
- throw new RangeError ("Integer too long '" + length + "'");
- length = 4;
- }
-
- for (var i = 0; i < length; i++) {
- value *= 256;
- value += buffer.readByte ();
-
- if (isSigned && i <= 0) {
- if ((value & 0x80) == 0x80)
- signedBitSet = true;
- }
- }
-
- if (signedBitSet)
- value -= (1 << (i * 8));
-
- return value;
+function readUint(buffer, isSigned) {
+ buffer.readByte();
+ var length = buffer.readByte();
+ var value = 0;
+ var signedBitSet = false;
+
+ if (length > 5) {
+ throw new RangeError("Integer too long '" + length + "'");
+ } else if (length == 5) {
+ if (buffer.readByte() !== 0)
+ throw new RangeError("Integer too long '" + length + "'");
+ length = 4;
+ }
+
+ for (var i = 0; i < length; i++) {
+ value *= 256;
+ value += buffer.readByte();
+
+ if (isSigned && i <= 0) {
+ if ((value & 0x80) == 0x80)
+ signedBitSet = true;
+ }
+ }
+
+ if (signedBitSet)
+ value -= (1 << (i * 8));
+
+ return value;
}
-function readUint64 (buffer) {
- var value = buffer.readString (ObjectType.Counter64, true);
+function readUint64(buffer) {
+ var value = buffer.readString(ObjectType.Counter64, true);
- return value;
+ return value;
}
-function readVarbinds (buffer, varbinds) {
- buffer.readSequence ();
-
- while (1) {
- buffer.readSequence ();
- var oid = buffer.readOID ();
- var type = buffer.peek ();
-
- if (type == null)
- break;
-
- var value;
-
- if (type == ObjectType.Boolean) {
- value = buffer.readBoolean ();
- } else if (type == ObjectType.Integer) {
- value = readInt (buffer);
- } else if (type == ObjectType.OctetString) {
- value = buffer.readString (null, true);
- } else if (type == ObjectType.Null) {
- buffer.readByte ();
- buffer.readByte ();
- value = null;
- } else if (type == ObjectType.OID) {
- value = buffer.readOID ();
- } else if (type == ObjectType.IpAddress) {
- var bytes = buffer.readString (ObjectType.IpAddress, true);
- if (bytes.length != 4)
- throw new ResponseInvalidError ("Length '" + bytes.length
- + "' of IP address '" + bytes.toString ("hex")
- + "' is not 4");
- value = bytes[0] + "." + bytes[1] + "." + bytes[2] + "." + bytes[3];
- } else if (type == ObjectType.Counter) {
- value = readUint (buffer);
- } else if (type == ObjectType.Gauge) {
- value = readUint (buffer);
- } else if (type == ObjectType.TimeTicks) {
- value = readUint (buffer);
- } else if (type == ObjectType.Opaque) {
- value = buffer.readString (ObjectType.Opaque, true);
- } else if (type == ObjectType.Counter64) {
- value = readUint64 (buffer);
- } else if (type == ObjectType.NoSuchObject) {
- buffer.readByte ();
- buffer.readByte ();
- value = null;
- } else if (type == ObjectType.NoSuchInstance) {
- buffer.readByte ();
- buffer.readByte ();
- value = null;
- } else if (type == ObjectType.EndOfMibView) {
- buffer.readByte ();
- buffer.readByte ();
- value = null;
- } else {
- throw new ResponseInvalidError ("Unknown type '" + type
- + "' in response");
- }
-
- varbinds.push ({
- oid: oid,
- type: type,
- value: value
- });
- }
+function readVarbinds(buffer, varbinds) {
+ buffer.readSequence();
+
+ while (1) {
+ buffer.readSequence();
+ if (buffer.peek() != ObjectType.OID)
+ break;
+ var oid = buffer.readOID();
+ var type = buffer.peek();
+
+ if (type == null)
+ break;
+
+ var value;
+
+ if (type == ObjectType.Boolean) {
+ value = buffer.readBoolean();
+ } else if (type == ObjectType.Integer) {
+ value = readInt(buffer);
+ } else if (type == ObjectType.OctetString) {
+ value = buffer.readString(null, true);
+ } else if (type == ObjectType.Null) {
+ buffer.readByte();
+ buffer.readByte();
+ value = null;
+ } else if (type == ObjectType.OID) {
+ value = buffer.readOID();
+ } else if (type == ObjectType.IpAddress) {
+ var bytes = buffer.readString(ObjectType.IpAddress, true);
+ if (bytes.length != 4)
+ throw new ResponseInvalidError("Length '" + bytes.length
+ + "' of IP address '" + bytes.toString("hex")
+ + "' is not 4");
+ value = bytes[0] + "." + bytes[1] + "." + bytes[2] + "." + bytes[3];
+ } else if (type == ObjectType.Counter) {
+ value = readUint(buffer);
+ } else if (type == ObjectType.Gauge) {
+ value = readUint(buffer);
+ } else if (type == ObjectType.TimeTicks) {
+ value = readUint(buffer);
+ } else if (type == ObjectType.Opaque) {
+ value = buffer.readString(ObjectType.Opaque, true);
+ } else if (type == ObjectType.Counter64) {
+ value = readUint64(buffer);
+ } else if (type == ObjectType.NoSuchObject) {
+ buffer.readByte();
+ buffer.readByte();
+ value = null;
+ } else if (type == ObjectType.NoSuchInstance) {
+ buffer.readByte();
+ buffer.readByte();
+ value = null;
+ } else if (type == ObjectType.EndOfMibView) {
+ buffer.readByte();
+ buffer.readByte();
+ value = null;
+ } else {
+ throw new ResponseInvalidError("Unknown type '" + type
+ + "' in response");
+ }
+
+ varbinds.push({
+ oid: oid,
+ type: type,
+ value: value
+ });
+ }
}
-function writeUint (buffer, type, value) {
- var b = new Buffer (4);
- b.writeUInt32BE (value, 0);
- buffer.writeBuffer (b, type);
+function writeUint(buffer, type, value) {
+ var b = Buffer.alloc(4);
+ b.writeUInt32BE(value, 0);
+ buffer.writeBuffer(b, type);
}
-function writeUint64 (buffer, value) {
- buffer.writeBuffer (value, ObjectType.Counter64);
+function writeUint64(buffer, value) {
+ buffer.writeBuffer(value, ObjectType.Counter64);
}
-function writeVarbinds (buffer, varbinds) {
- buffer.startSequence ();
- for (var i = 0; i < varbinds.length; i++) {
- buffer.startSequence ();
- buffer.writeOID (varbinds[i].oid);
-
- if (varbinds[i].type && varbinds[i].hasOwnProperty("value")) {
- var type = varbinds[i].type;
- var value = varbinds[i].value;
-
- if (type == ObjectType.Boolean) {
- buffer.writeBoolean (value ? true : false);
- } else if (type == ObjectType.Integer) { // also Integer32
- buffer.writeInt (value);
- } else if (type == ObjectType.OctetString) {
- if (typeof value == "string")
- buffer.writeString (value);
- else
- buffer.writeBuffer (value, ObjectType.OctetString);
- } else if (type == ObjectType.Null) {
- buffer.writeNull ();
- } else if (type == ObjectType.OID) {
- buffer.writeOID (value);
- } else if (type == ObjectType.IpAddress) {
- var bytes = value.split (".");
- if (bytes.length != 4)
- throw new RequestInvalidError ("Invalid IP address '"
- + value + "'");
- buffer.writeBuffer (new Buffer (bytes), 64);
- } else if (type == ObjectType.Counter) { // also Counter32
- writeUint (buffer, ObjectType.Counter, value);
- } else if (type == ObjectType.Gauge) { // also Gauge32 & Unsigned32
- writeUint (buffer, ObjectType.Gauge, value);
- } else if (type == ObjectType.TimeTicks) {
- writeUint (buffer, ObjectType.TimeTicks, value);
- } else if (type == ObjectType.Opaque) {
- buffer.writeBuffer (value, ObjectType.Opaque);
- } else if (type == ObjectType.Counter64) {
- writeUint64 (buffer, value);
- } else {
- throw new RequestInvalidError ("Unknown type '" + type
- + "' in request");
- }
- } else {
- buffer.writeNull ();
- }
-
- buffer.endSequence ();
- }
- buffer.endSequence ();
+function writeVarbinds(buffer, varbinds) {
+ buffer.startSequence();
+ for (var i = 0; i < varbinds.length; i++) {
+ buffer.startSequence();
+ buffer.writeOID(varbinds[i].oid);
+
+ if (varbinds[i].type && varbinds[i].hasOwnProperty("value")) {
+ var type = varbinds[i].type;
+ var value = varbinds[i].value;
+
+ if (type == ObjectType.Boolean) {
+ buffer.writeBoolean(value ? true : false);
+ } else if (type == ObjectType.Integer) { // also Integer32
+ buffer.writeInt(value);
+ } else if (type == ObjectType.OctetString) {
+ if (typeof value == "string")
+ buffer.writeString(value);
+ else
+ buffer.writeBuffer(value, ObjectType.OctetString);
+ } else if (type == ObjectType.Null) {
+ buffer.writeNull();
+ } else if (type == ObjectType.OID) {
+ buffer.writeOID(value);
+ } else if (type == ObjectType.IpAddress) {
+ var bytes = value.split(".");
+ if (bytes.length != 4)
+ throw new RequestInvalidError("Invalid IP address '"
+ + value + "'");
+ buffer.writeBuffer(Buffer.from(bytes), 64);
+ } else if (type == ObjectType.Counter) { // also Counter32
+ writeUint(buffer, ObjectType.Counter, value);
+ } else if (type == ObjectType.Gauge) { // also Gauge32 & Unsigned32
+ writeUint(buffer, ObjectType.Gauge, value);
+ } else if (type == ObjectType.TimeTicks) {
+ writeUint(buffer, ObjectType.TimeTicks, value);
+ } else if (type == ObjectType.Opaque) {
+ buffer.writeBuffer(value, ObjectType.Opaque);
+ } else if (type == ObjectType.Counter64) {
+ writeUint64(buffer, value);
+ } else if (type == ObjectType.EndOfMibView) {
+ buffer.writeByte(130);
+ buffer.writeByte(0);
+ } else {
+ throw new RequestInvalidError("Unknown type '" + type
+ + "' in request");
+ }
+ } else {
+ buffer.writeNull();
+ }
+
+ buffer.endSequence();
+ }
+ buffer.endSequence();
}
/*****************************************************************************
** PDU class definitions
**/
-var SimplePdu = function (id, varbinds, options) {
- this.id = id;
- this.varbinds = varbinds;
- this.options = options || {};
+var SimplePdu = function () {
};
SimplePdu.prototype.toBuffer = function (buffer) {
- buffer.startSequence (this.type);
+ buffer.startSequence(this.type);
- buffer.writeInt (this.id);
- buffer.writeInt ((this.type == PduType.GetBulkRequest)
- ? (this.options.nonRepeaters || 0)
- : 0);
- buffer.writeInt ((this.type == PduType.GetBulkRequest)
- ? (this.options.maxRepetitions || 0)
- : 0);
+ buffer.writeInt(this.id);
+ buffer.writeInt((this.type == PduType.GetBulkRequest)
+ ? (this.options.nonRepeaters || 0)
+ : 0);
+ buffer.writeInt((this.type == PduType.GetBulkRequest)
+ ? (this.options.maxRepetitions || 0)
+ : 0);
- writeVarbinds (buffer, this.varbinds);
+ writeVarbinds(buffer, this.varbinds);
- buffer.endSequence ();
+ buffer.endSequence();
};
-var GetBulkRequestPdu = function () {
- this.type = PduType.GetBulkRequest;
- GetBulkRequestPdu.super_.apply (this, arguments);
-};
+SimplePdu.prototype.initializeFromVariables = function (id, varbinds, options) {
+ this.id = id;
+ this.varbinds = varbinds;
+ this.options = options || {};
+ this.contextName = (options && options.context) ? options.context : "";
+}
-util.inherits (GetBulkRequestPdu, SimplePdu);
+SimplePdu.prototype.initializeFromBuffer = function (reader) {
+ this.type = reader.peek();
+ reader.readSequence();
+
+ this.id = reader.readInt();
+ this.nonRepeaters = reader.readInt();
+ this.maxRepetitions = reader.readInt();
+
+ this.varbinds = [];
+ readVarbinds(reader, this.varbinds);
-var GetNextRequestPdu = function () {
- this.type = PduType.GetNextRequest;
- GetNextRequestPdu.super_.apply (this, arguments);
};
-util.inherits (GetNextRequestPdu, SimplePdu);
+SimplePdu.prototype.getResponsePduForRequest = function () {
+ var responsePdu = GetResponsePdu.createFromVariables(this.id, [], {});
+ if (this.contextEngineID) {
+ responsePdu.contextEngineID = this.contextEngineID;
+ responsePdu.contextName = this.contextName;
+ }
+ return responsePdu;
+};
+
+SimplePdu.createFromVariables = function (pduClass, id, varbinds, options) {
+ var pdu = new pduClass(id, varbinds, options);
+ pdu.id = id;
+ pdu.varbinds = varbinds;
+ pdu.options = options || {};
+ pdu.contextName = (options && options.context) ? options.context : "";
+ return pdu;
+};
-var GetResponsePdu = function (buffer) {
- this.type = PduType.GetResponse;
+var GetBulkRequestPdu = function () {
+ this.type = PduType.GetBulkRequest;
+ GetBulkRequestPdu.super_.apply(this, arguments);
+};
- buffer.readSequence (this.type);
+util.inherits(GetBulkRequestPdu, SimplePdu);
- this.id = buffer.readInt ();
+GetBulkRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new GetBulkRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
- this.errorStatus = buffer.readInt ();
- this.errorIndex = buffer.readInt ();
+var GetNextRequestPdu = function () {
+ this.type = PduType.GetNextRequest;
+ GetNextRequestPdu.super_.apply(this, arguments);
+};
- this.varbinds = [];
+util.inherits(GetNextRequestPdu, SimplePdu);
- readVarbinds (buffer, this.varbinds);
+GetNextRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new GetNextRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
};
var GetRequestPdu = function () {
- this.type = PduType.GetRequest;
- GetRequestPdu.super_.apply (this, arguments);
+ this.type = PduType.GetRequest;
+ GetRequestPdu.super_.apply(this, arguments);
+};
+
+util.inherits(GetRequestPdu, SimplePdu);
+
+GetRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new GetRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
};
-util.inherits (GetRequestPdu, SimplePdu);
+GetRequestPdu.createFromVariables = function (id, varbinds, options) {
+ var pdu = new GetRequestPdu();
+ pdu.initializeFromVariables(id, varbinds, options);
+ return pdu;
+};
var InformRequestPdu = function () {
- this.type = PduType.InformRequest;
- InformRequestPdu.super_.apply (this, arguments);
+ this.type = PduType.InformRequest;
+ InformRequestPdu.super_.apply(this, arguments);
};
-util.inherits (InformRequestPdu, SimplePdu);
+util.inherits(InformRequestPdu, SimplePdu);
+
+InformRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new InformRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
var SetRequestPdu = function () {
- this.type = PduType.SetRequest;
- SetRequestPdu.super_.apply (this, arguments);
+ this.type = PduType.SetRequest;
+ SetRequestPdu.super_.apply(this, arguments);
+};
+
+util.inherits(SetRequestPdu, SimplePdu);
+
+SetRequestPdu.createFromBuffer = function (reader) {
+ var pdu = new SetRequestPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
};
-util.inherits (SetRequestPdu, SimplePdu);
+var TrapPdu = function () {
+ this.type = PduType.Trap;
+};
-var TrapPdu = function (typeOrOid, varbinds, options) {
- this.type = PduType.Trap;
+TrapPdu.prototype.toBuffer = function (buffer) {
+ buffer.startSequence(this.type);
- this.agentAddr = options.agentAddr || "127.0.0.1";
- this.upTime = options.upTime;
+ buffer.writeOID(this.enterprise);
+ buffer.writeBuffer(Buffer.from(this.agentAddr.split(".")),
+ ObjectType.IpAddress);
+ buffer.writeInt(this.generic);
+ buffer.writeInt(this.specific);
+ writeUint(buffer, ObjectType.TimeTicks,
+ this.upTime || Math.floor(process.uptime() * 100));
- if (typeof typeOrOid == "string") {
- this.generic = TrapType.EnterpriseSpecific;
- this.specific = parseInt (typeOrOid.match (/\.(\d+)$/)[1]);
- this.enterprise = typeOrOid.replace (/\.(\d+)$/, "");
- } else {
- this.generic = typeOrOid;
- this.specific = 0;
- this.enterprise = "1.3.6.1.4.1";
- }
+ writeVarbinds(buffer, this.varbinds);
- this.varbinds = varbinds;
+ buffer.endSequence();
};
-TrapPdu.prototype.toBuffer = function (buffer) {
- buffer.startSequence (this.type);
+TrapPdu.createFromBuffer = function (reader) {
+ var pdu = new TrapPdu();
+ reader.readSequence();
+
+ pdu.enterprise = reader.readOID();
+ pdu.agentAddr = readIpAddress(reader);
+ pdu.generic = reader.readInt();
+ pdu.specific = reader.readInt();
+ pdu.upTime = readUint(reader)
+
+ pdu.varbinds = [];
+ readVarbinds(reader, pdu.varbinds);
+
+ return pdu;
+};
- buffer.writeOID (this.enterprise);
- buffer.writeBuffer (new Buffer (this.agentAddr.split (".")),
- ObjectType.IpAddress);
- buffer.writeInt (this.generic);
- buffer.writeInt (this.specific);
- writeUint (buffer, ObjectType.TimeTicks,
- this.upTime || Math.floor (process.uptime () * 100));
+TrapPdu.createFromVariables = function (typeOrOid, varbinds, options) {
+ var pdu = new TrapPdu();
+ pdu.agentAddr = options.agentAddr || "127.0.0.1";
+ pdu.upTime = options.upTime;
- writeVarbinds (buffer, this.varbinds);
+ if (typeof typeOrOid == "string") {
+ pdu.generic = TrapType.EnterpriseSpecific;
+ pdu.specific = parseInt(typeOrOid.match(/\.(\d+)$/)[1]);
+ pdu.enterprise = typeOrOid.replace(/\.(\d+)$/, "");
+ } else {
+ pdu.generic = typeOrOid;
+ pdu.specific = 0;
+ pdu.enterprise = "1.3.6.1.4.1";
+ }
- buffer.endSequence ();
+ pdu.varbinds = varbinds;
+
+ return pdu;
};
var TrapV2Pdu = function () {
- this.type = PduType.TrapV2;
- TrapV2Pdu.super_.apply (this, arguments);
+ this.type = PduType.TrapV2;
+ TrapV2Pdu.super_.apply(this, arguments);
+};
+
+util.inherits(TrapV2Pdu, SimplePdu);
+
+TrapV2Pdu.createFromBuffer = function (reader) {
+ var pdu = new TrapV2Pdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+TrapV2Pdu.createFromVariables = function (id, varbinds, options) {
+ var pdu = new TrapV2Pdu();
+ pdu.initializeFromVariables(id, varbinds, options);
+ return pdu;
+};
+
+var SimpleResponsePdu = function () {
+};
+
+SimpleResponsePdu.prototype.toBuffer = function (writer) {
+ writer.startSequence(this.type);
+
+ writer.writeInt(this.id);
+ writer.writeInt(this.errorStatus || 0);
+ writer.writeInt(this.errorIndex || 0);
+ writeVarbinds(writer, this.varbinds);
+ writer.endSequence();
+
};
-util.inherits (TrapV2Pdu, SimplePdu);
+SimpleResponsePdu.prototype.initializeFromBuffer = function (reader) {
+ reader.readSequence(this.type);
+
+ this.id = reader.readInt();
+ this.errorStatus = reader.readInt();
+ this.errorIndex = reader.readInt();
+
+ this.varbinds = [];
+ readVarbinds(reader, this.varbinds);
+};
+
+SimpleResponsePdu.prototype.initializeFromVariables = function (id, varbinds, options) {
+ this.id = id;
+ this.varbinds = varbinds;
+ this.options = options || {};
+};
+
+var GetResponsePdu = function () {
+ this.type = PduType.GetResponse;
+ GetResponsePdu.super_.apply(this, arguments);
+};
+
+util.inherits(GetResponsePdu, SimpleResponsePdu);
+
+GetResponsePdu.createFromBuffer = function (reader) {
+ var pdu = new GetResponsePdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+GetResponsePdu.createFromVariables = function (id, varbinds, options) {
+ var pdu = new GetResponsePdu();
+ pdu.initializeFromVariables(id, varbinds, options);
+ return pdu;
+};
+
+var ReportPdu = function () {
+ this.type = PduType.Report;
+ ReportPdu.super_.apply(this, arguments);
+};
+
+util.inherits(ReportPdu, SimpleResponsePdu);
+
+ReportPdu.createFromBuffer = function (reader) {
+ var pdu = new ReportPdu();
+ pdu.initializeFromBuffer(reader);
+ return pdu;
+};
+
+ReportPdu.createFromVariables = function (id, varbinds, options) {
+ var pdu = new ReportPdu();
+ pdu.initializeFromVariables(id, varbinds, options);
+ return pdu;
+};
+
+var readPdu = function (reader, scoped) {
+ var pdu;
+ var contextEngineID;
+ var contextName;
+ if (scoped) {
+ reader.readSequence();
+ contextEngineID = reader.readString(ber.OctetString, true);
+ contextName = reader.readString();
+ }
+ var type = reader.peek();
+
+ if (type == PduType.GetResponse) {
+ pdu = GetResponsePdu.createFromBuffer(reader);
+ } else if (type == PduType.Report) {
+ pdu = ReportPdu.createFromBuffer(reader);
+ } else if (type == PduType.Trap) {
+ pdu = TrapPdu.createFromBuffer(reader);
+ } else if (type == PduType.TrapV2) {
+ pdu = TrapV2Pdu.createFromBuffer(reader);
+ } else if (type == PduType.InformRequest) {
+ pdu = InformRequestPdu.createFromBuffer(reader);
+ } else if (type == PduType.GetRequest) {
+ pdu = GetRequestPdu.createFromBuffer(reader);
+ } else if (type == PduType.SetRequest) {
+ pdu = SetRequestPdu.createFromBuffer(reader);
+ } else if (type == PduType.GetNextRequest) {
+ pdu = GetNextRequestPdu.createFromBuffer(reader);
+ } else if (type == PduType.GetBulkRequest) {
+ pdu = GetBulkRequestPdu.createFromBuffer(reader);
+ } else {
+ throw new ResponseInvalidError("Unknown PDU type '" + type
+ + "' in response");
+ }
+ if (scoped) {
+ pdu.contextEngineID = contextEngineID;
+ pdu.contextName = contextName;
+ }
+ pdu.scoped = scoped;
+ return pdu;
+};
+
+var createDiscoveryPdu = function (context) {
+ return GetRequestPdu.createFromVariables(_generateId(), [], {context: context});
+};
+
+var Authentication = {};
+
+Authentication.HMAC_BUFFER_SIZE = 1024 * 1024;
+Authentication.HMAC_BLOCK_SIZE = 64;
+Authentication.AUTHENTICATION_CODE_LENGTH = 12;
+Authentication.AUTH_PARAMETERS_PLACEHOLDER = Buffer.from('8182838485868788898a8b8c', 'hex');
+
+Authentication.algorithms = {};
+
+Authentication.algorithms[AuthProtocols.md5] = {
+ // KEY_LENGTH: 16,
+ CRYPTO_ALGORITHM: 'md5'
+};
+
+Authentication.algorithms[AuthProtocols.sha] = {
+ // KEY_LENGTH: 20,
+ CRYPTO_ALGORITHM: 'sha1'
+};
+
+// Adapted from RFC3414 Appendix A.2.1. Password to Key Sample Code for MD5
+Authentication.passwordToKey = function (authProtocol, authPasswordString, engineID) {
+ var hashAlgorithm;
+ var firstDigest;
+ var finalDigest;
+ var buf = Buffer.alloc(Authentication.HMAC_BUFFER_SIZE);
+ var bufOffset = 0;
+ var passwordIndex = 0;
+ var count = 0;
+ var password = Buffer.from(authPasswordString);
+ var cryptoAlgorithm = Authentication.algorithms[authProtocol].CRYPTO_ALGORITHM;
+
+ while (count < Authentication.HMAC_BUFFER_SIZE) {
+ for (var i = 0; i < Authentication.HMAC_BLOCK_SIZE; i++) {
+ buf.writeUInt8(password[passwordIndex++ % password.length], bufOffset++);
+ }
+ count += Authentication.HMAC_BLOCK_SIZE;
+ }
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(buf);
+ firstDigest = hashAlgorithm.digest();
+ // debug ("First digest: " + firstDigest.toString('hex'));
+
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(firstDigest);
+ hashAlgorithm.update(engineID);
+ hashAlgorithm.update(firstDigest);
+ finalDigest = hashAlgorithm.digest();
+ debug("Localized key: " + finalDigest.toString('hex'));
+
+ return finalDigest;
+};
+
+Authentication.addParametersToMessageBuffer = function (messageBuffer, authProtocol, authPassword, engineID) {
+ var authenticationParametersOffset;
+ var digestToAdd;
+
+ // clear the authenticationParameters field in message
+ authenticationParametersOffset = messageBuffer.indexOf(Authentication.AUTH_PARAMETERS_PLACEHOLDER);
+ messageBuffer.fill(0, authenticationParametersOffset, authenticationParametersOffset + Authentication.AUTHENTICATION_CODE_LENGTH);
+
+ digestToAdd = Authentication.calculateDigest(messageBuffer, authProtocol, authPassword, engineID);
+ digestToAdd.copy(messageBuffer, authenticationParametersOffset, 0, Authentication.AUTHENTICATION_CODE_LENGTH);
+ debug("Added Auth Parameters: " + digestToAdd.toString('hex'));
+};
+
+Authentication.isAuthentic = function (messageBuffer, authProtocol, authPassword, engineID, digestInMessage) {
+ var authenticationParametersOffset;
+ var calculatedDigest;
+
+ // clear the authenticationParameters field in message
+ authenticationParametersOffset = messageBuffer.indexOf(digestInMessage);
+ messageBuffer.fill(0, authenticationParametersOffset, authenticationParametersOffset + Authentication.AUTHENTICATION_CODE_LENGTH);
+
+ calculatedDigest = Authentication.calculateDigest(messageBuffer, authProtocol, authPassword, engineID);
+
+ // replace previously cleared authenticationParameters field in message
+ digestInMessage.copy(messageBuffer, authenticationParametersOffset, 0, Authentication.AUTHENTICATION_CODE_LENGTH);
+
+ debug("Digest in message: " + digestInMessage.toString('hex'));
+ debug("Calculated digest: " + calculatedDigest.toString('hex'));
+ return calculatedDigest.equals(digestInMessage, Authentication.AUTHENTICATION_CODE_LENGTH);
+};
+
+Authentication.calculateDigest = function (messageBuffer, authProtocol, authPassword, engineID) {
+ var authKey = Authentication.passwordToKey(authProtocol, authPassword, engineID);
+
+ // Adapted from RFC3147 Section 6.3.1. Processing an Outgoing Message
+ var hashAlgorithm;
+ var kIpad;
+ var kOpad;
+ var firstDigest;
+ var finalDigest;
+ var truncatedDigest;
+ var i;
+ var cryptoAlgorithm = Authentication.algorithms[authProtocol].CRYPTO_ALGORITHM;
+
+ if (authKey.length > Authentication.HMAC_BLOCK_SIZE) {
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(authKey);
+ authKey = hashAlgorithm.digest();
+ }
+
+ // MD(K XOR opad, MD(K XOR ipad, msg))
+ kIpad = Buffer.alloc(Authentication.HMAC_BLOCK_SIZE);
+ kOpad = Buffer.alloc(Authentication.HMAC_BLOCK_SIZE);
+ for (i = 0; i < authKey.length; i++) {
+ kIpad[i] = authKey[i] ^ 0x36;
+ kOpad[i] = authKey[i] ^ 0x5c;
+ }
+ kIpad.fill(0x36, authKey.length);
+ kOpad.fill(0x5c, authKey.length);
+
+ // inner MD
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(kIpad);
+ hashAlgorithm.update(messageBuffer);
+ firstDigest = hashAlgorithm.digest();
+ // outer MD
+ hashAlgorithm = crypto.createHash(cryptoAlgorithm);
+ hashAlgorithm.update(kOpad);
+ hashAlgorithm.update(firstDigest);
+ finalDigest = hashAlgorithm.digest();
+
+ truncatedDigest = Buffer.alloc(Authentication.AUTHENTICATION_CODE_LENGTH);
+ finalDigest.copy(truncatedDigest, 0, 0, Authentication.AUTHENTICATION_CODE_LENGTH);
+ return truncatedDigest;
+};
+
+var Encryption = {};
+
+Encryption.INPUT_KEY_LENGTH = 16;
+Encryption.DES_KEY_LENGTH = 8;
+Encryption.DES_BLOCK_LENGTH = 8;
+Encryption.CRYPTO_DES_ALGORITHM = 'des-cbc';
+Encryption.PRIV_PARAMETERS_PLACEHOLDER = Buffer.from('9192939495969798', 'hex');
+
+Encryption.encryptPdu = function (scopedPdu, privProtocol, privPassword, authProtocol, engineID) {
+ var privLocalizedKey;
+ var encryptionKey;
+ var preIv;
+ var salt;
+ var iv;
+ var i;
+ var paddedScopedPduLength;
+ var paddedScopedPdu;
+ var encryptedPdu;
+ var cbcProtocol = Encryption.CRYPTO_DES_ALGORITHM;
+
+ privLocalizedKey = Authentication.passwordToKey(authProtocol, privPassword, engineID);
+ encryptionKey = Buffer.alloc(Encryption.DES_KEY_LENGTH);
+ privLocalizedKey.copy(encryptionKey, 0, 0, Encryption.DES_KEY_LENGTH);
+ preIv = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ privLocalizedKey.copy(preIv, 0, Encryption.DES_KEY_LENGTH, Encryption.DES_KEY_LENGTH + Encryption.DES_BLOCK_LENGTH);
+
+ salt = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ // set local SNMP engine boots part of salt to 1, as we have no persistent engine state
+ salt.fill('00000001', 0, 4, 'hex');
+ // set local integer part of salt to random
+ salt.fill(crypto.randomBytes(4), 4, 8);
+ iv = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ for (i = 0; i < iv.length; i++) {
+ iv[i] = preIv[i] ^ salt[i];
+ }
+
+ if (scopedPdu.length % Encryption.DES_BLOCK_LENGTH == 0) {
+ paddedScopedPdu = scopedPdu;
+ } else {
+ paddedScopedPduLength = Encryption.DES_BLOCK_LENGTH * (Math.floor(scopedPdu.length / Encryption.DES_BLOCK_LENGTH) + 1);
+ paddedScopedPdu = Buffer.alloc(paddedScopedPduLength);
+ scopedPdu.copy(paddedScopedPdu, 0, 0, scopedPdu.length);
+ }
+ cipher = crypto.createCipheriv(cbcProtocol, encryptionKey, iv);
+ encryptedPdu = cipher.update(paddedScopedPdu);
+ encryptedPdu = Buffer.concat([encryptedPdu, cipher.final()]);
+ debug("Key: " + encryptionKey.toString('hex'));
+ debug("IV: " + iv.toString('hex'));
+ debug("Plain: " + paddedScopedPdu.toString('hex'));
+ debug("Encrypted: " + encryptedPdu.toString('hex'));
+
+ return {
+ encryptedPdu: encryptedPdu,
+ msgPrivacyParameters: salt
+ };
+};
+
+Encryption.decryptPdu = function (encryptedPdu, privProtocol, privParameters, privPassword, authProtocol, engineID, forceAutoPaddingDisable) {
+ var privLocalizedKey;
+ var decryptionKey;
+ var preIv;
+ var salt;
+ var iv;
+ var i;
+ var decryptedPdu;
+ var cbcProtocol = Encryption.CRYPTO_DES_ALGORITHM;
+ ;
+
+ privLocalizedKey = Authentication.passwordToKey(authProtocol, privPassword, engineID);
+ decryptionKey = Buffer.alloc(Encryption.DES_KEY_LENGTH);
+ privLocalizedKey.copy(decryptionKey, 0, 0, Encryption.DES_KEY_LENGTH);
+ preIv = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ privLocalizedKey.copy(preIv, 0, Encryption.DES_KEY_LENGTH, Encryption.DES_KEY_LENGTH + Encryption.DES_BLOCK_LENGTH);
+
+ salt = privParameters;
+ iv = Buffer.alloc(Encryption.DES_BLOCK_LENGTH);
+ for (i = 0; i < iv.length; i++) {
+ iv[i] = preIv[i] ^ salt[i];
+ }
+
+ decipher = crypto.createDecipheriv(cbcProtocol, decryptionKey, iv);
+ if (forceAutoPaddingDisable) {
+ decipher.setAutoPadding(false);
+ }
+ decryptedPdu = decipher.update(encryptedPdu);
+ // This try-catch is a workaround for a seemingly incorrect error condition
+ // - where sometimes a decrypt error is thrown with decipher.final()
+ // It replaces this line which should have been sufficient:
+ // decryptedPdu = Buffer.concat ([decryptedPdu, decipher.final()]);
+ try {
+ decryptedPdu = Buffer.concat([decryptedPdu, decipher.final()]);
+ } catch (error) {
+ // debug("Decrypt error: " + error);
+ decipher = crypto.createDecipheriv(cbcProtocol, decryptionKey, iv);
+ decipher.setAutoPadding(false);
+ decryptedPdu = decipher.update(encryptedPdu);
+ decryptedPdu = Buffer.concat([decryptedPdu, decipher.final()]);
+ }
+ debug("Key: " + decryptionKey.toString('hex'));
+ debug("IV: " + iv.toString('hex'));
+ debug("Encrypted: " + encryptedPdu.toString('hex'));
+ debug("Plain: " + decryptedPdu.toString('hex'));
+
+ return decryptedPdu;
+
+};
+
+Encryption.addParametersToMessageBuffer = function (messageBuffer, msgPrivacyParameters) {
+ privacyParametersOffset = messageBuffer.indexOf(Encryption.PRIV_PARAMETERS_PLACEHOLDER);
+ msgPrivacyParameters.copy(messageBuffer, privacyParametersOffset, 0, Encryption.DES_IV_LENGTH);
+};
/*****************************************************************************
- ** Message class definitions
+ ** Message class definition
**/
-var RequestMessage = function (version, community, pdu) {
- this.version = version;
- this.community = community;
- this.pdu = pdu;
+var Message = function () {
+}
+
+Message.prototype.getReqId = function () {
+ return this.version == Version3 ? this.msgGlobalData.msgID : this.pdu.id;
+};
+
+Message.prototype.toBuffer = function () {
+ if (this.version == Version3) {
+ return this.toBufferV3();
+ } else {
+ return this.toBufferCommunity();
+ }
+}
+
+Message.prototype.toBufferCommunity = function () {
+ if (this.buffer)
+ return this.buffer;
+
+ var writer = new ber.Writer();
+
+ writer.startSequence();
+
+ writer.writeInt(this.version);
+ writer.writeString(this.community);
+
+ this.pdu.toBuffer(writer);
+
+ writer.endSequence();
+
+ this.buffer = writer.buffer;
+
+ return this.buffer;
+};
+
+Message.prototype.toBufferV3 = function () {
+ var encryptionResult;
+
+ if (this.buffer)
+ return this.buffer;
+
+ var writer = new ber.Writer();
+
+ writer.startSequence();
+
+ writer.writeInt(this.version);
+
+ // HeaderData
+ writer.startSequence();
+ writer.writeInt(this.msgGlobalData.msgID);
+ writer.writeInt(this.msgGlobalData.msgMaxSize);
+ writer.writeByte(ber.OctetString);
+ writer.writeByte(1);
+ writer.writeByte(this.msgGlobalData.msgFlags);
+ writer.writeInt(this.msgGlobalData.msgSecurityModel);
+ writer.endSequence();
+
+ // msgSecurityParameters
+ var msgSecurityParametersWriter = new ber.Writer();
+ msgSecurityParametersWriter.startSequence();
+ //msgSecurityParametersWriter.writeString (this.msgSecurityParameters.msgAuthoritativeEngineID);
+ // writing a zero-length buffer fails - should fix asn1-ber for this condition
+ if (this.msgSecurityParameters.msgAuthoritativeEngineID.length == 0) {
+ msgSecurityParametersWriter.writeString("");
+ } else {
+ msgSecurityParametersWriter.writeBuffer(this.msgSecurityParameters.msgAuthoritativeEngineID, ber.OctetString);
+ }
+ msgSecurityParametersWriter.writeInt(this.msgSecurityParameters.msgAuthoritativeEngineBoots);
+ msgSecurityParametersWriter.writeInt(this.msgSecurityParameters.msgAuthoritativeEngineTime);
+ msgSecurityParametersWriter.writeString(this.msgSecurityParameters.msgUserName);
+
+ if (this.hasAuthentication()) {
+ msgSecurityParametersWriter.writeBuffer(Authentication.AUTH_PARAMETERS_PLACEHOLDER, ber.OctetString);
+ // should never happen where msgFlags has no authentication but authentication parameters still present
+ } else if (this.msgSecurityParameters.msgAuthenticationParameters.length > 0) {
+ msgSecurityParametersWriter.writeBuffer(this.msgSecurityParameters.msgAuthenticationParameters, ber.OctetString);
+ } else {
+ msgSecurityParametersWriter.writeString("");
+ }
+
+ if (this.hasPrivacy()) {
+ msgSecurityParametersWriter.writeBuffer(Encryption.PRIV_PARAMETERS_PLACEHOLDER, ber.OctetString);
+ // should never happen where msgFlags has no privacy but privacy parameters still present
+ } else if (this.msgSecurityParameters.msgPrivacyParameters.length > 0) {
+ msgSecurityParametersWriter.writeBuffer(this.msgSecurityParameters.msgPrivacyParameters, ber.OctetString);
+ } else {
+ msgSecurityParametersWriter.writeString("");
+ }
+ msgSecurityParametersWriter.endSequence();
+
+ writer.writeBuffer(msgSecurityParametersWriter.buffer, ber.OctetString);
+
+ // ScopedPDU
+ var scopedPduWriter = new ber.Writer();
+ scopedPduWriter.startSequence();
+ var contextEngineID = this.pdu.contextEngineID ? this.pdu.contextEngineID : this.msgSecurityParameters.msgAuthoritativeEngineID;
+ if (contextEngineID.length == 0) {
+ scopedPduWriter.writeString("");
+ } else {
+ scopedPduWriter.writeBuffer(contextEngineID, ber.OctetString);
+ }
+ scopedPduWriter.writeString(this.pdu.contextName);
+ this.pdu.toBuffer(scopedPduWriter);
+ scopedPduWriter.endSequence();
+
+ if (this.hasPrivacy()) {
+ encryptionResult = Encryption.encryptPdu(scopedPduWriter.buffer, this.user.privProtocol, this.user.privKey, this.user.authProtocol, this.msgSecurityParameters.msgAuthoritativeEngineID);
+ writer.writeBuffer(encryptionResult.encryptedPdu, ber.OctetString);
+ } else {
+ writer.writeBuffer(scopedPduWriter.buffer);
+ }
+
+ writer.endSequence();
+
+ this.buffer = writer.buffer;
+
+ if (this.hasPrivacy()) {
+ Encryption.addParametersToMessageBuffer(this.buffer, encryptionResult.msgPrivacyParameters);
+ }
+
+ if (this.hasAuthentication()) {
+ Authentication.addParametersToMessageBuffer(this.buffer, this.user.authProtocol, this.user.authKey,
+ this.msgSecurityParameters.msgAuthoritativeEngineID);
+ }
+
+ return this.buffer;
+};
+
+Message.prototype.processIncomingSecurity = function (user, responseCb) {
+ if (this.hasPrivacy()) {
+ if (!this.decryptPdu(user, responseCb)) {
+ return false;
+ }
+ }
+
+ if (this.hasAuthentication() && !this.isAuthenticationDisabled()) {
+ return this.checkAuthentication(user, responseCb);
+ } else {
+ return true;
+ }
+};
+
+Message.prototype.decryptPdu = function (user, responseCb) {
+ var decryptedPdu;
+ var decryptedPduReader;
+ try {
+ decryptedPdu = Encryption.decryptPdu(this.encryptedPdu, user.privProtocol,
+ this.msgSecurityParameters.msgPrivacyParameters, user.privKey, user.authProtocol,
+ this.msgSecurityParameters.msgAuthoritativeEngineID);
+ decryptedPduReader = new ber.Reader(decryptedPdu);
+ this.pdu = readPdu(decryptedPduReader, true);
+ return true;
+ // really really occasionally the decrypt truncates a single byte
+ // causing an ASN read failure in readPdu()
+ // in this case, disabling auto padding decrypts the PDU correctly
+ // this try-catch provides the workaround for this condition
+ } catch (possibleTruncationError) {
+ try {
+ decryptedPdu = Encryption.decryptPdu(this.encryptedPdu, user.privProtocol,
+ this.msgSecurityParameters.msgPrivacyParameters, user.privKey, user.authProtocol,
+ this.msgSecurityParameters.msgAuthoritativeEngineID, true);
+ decryptedPduReader = new ber.Reader(decryptedPdu);
+ this.pdu = readPdu(decryptedPduReader, true);
+ return true;
+ } catch (error) {
+ responseCb(new ResponseInvalidError("Failed to decrypt PDU: " + error));
+ return false;
+ }
+ }
+
+};
+
+Message.prototype.checkAuthentication = function (user, responseCb) {
+ if (Authentication.isAuthentic(this.buffer, user.authProtocol, user.authKey,
+ this.msgSecurityParameters.msgAuthoritativeEngineID, this.msgSecurityParameters.msgAuthenticationParameters)) {
+ return true;
+ } else {
+ responseCb(new ResponseInvalidError("Authentication digest "
+ + this.msgSecurityParameters.msgAuthenticationParameters.toString('hex')
+ + " received in message does not match digest "
+ + Authentication.calculateDigest(buffer, user.authProtocol, user.authKey,
+ this.msgSecurityParameters.msgAuthoritativeEngineID).toString('hex')
+ + " calculated for message"));
+ return false;
+ }
+
+};
+
+Message.prototype.hasAuthentication = function () {
+ return this.msgGlobalData && this.msgGlobalData.msgFlags && this.msgGlobalData.msgFlags & 1;
};
-RequestMessage.prototype.toBuffer = function () {
- if (this.buffer)
- return this.buffer;
+Message.prototype.hasPrivacy = function () {
+ return this.msgGlobalData && this.msgGlobalData.msgFlags && this.msgGlobalData.msgFlags & 2;
+};
+
+Message.prototype.isReportable = function () {
+ return this.msgGlobalData && this.msgGlobalData.msgFlags && this.msgGlobalData.msgFlags & 4;
+};
- var writer = new ber.Writer ();
+Message.prototype.setReportable = function (flag) {
+ if (this.msgGlobalData && this.msgGlobalData.msgFlags) {
+ if (flag) {
+ this.msgGlobalData.msgFlags = this.msgGlobalData.msgFlags | 4;
+ } else {
+ this.msgGlobalData.msgFlags = this.msgGlobalData.msgFlags & (255 - 4);
+ }
+ }
+};
- writer.startSequence ();
+Message.prototype.isAuthenticationDisabled = function () {
+ return this.disableAuthentication;
+};
- writer.writeInt (this.version);
- writer.writeString (this.community);
+Message.prototype.hasAuthoritativeEngineID = function () {
+ return this.msgSecurityParameters && this.msgSecurityParameters.msgAuthoritativeEngineID &&
+ this.msgSecurityParameters.msgAuthoritativeEngineID != "";
+};
- this.pdu.toBuffer (writer);
+Message.prototype.createReportResponseMessage = function (engine, context) {
+ var user = {
+ name: "",
+ level: SecurityLevel.noAuthNoPriv
+ };
+ var responseSecurityParameters = {
+ msgAuthoritativeEngineID: engine.engineID,
+ msgAuthoritativeEngineBoots: engine.engineBoots,
+ msgAuthoritativeEngineTime: engine.engineTime,
+ msgUserName: user.name,
+ msgAuthenticationParameters: "",
+ msgPrivacyParameters: ""
+ };
+ var reportPdu = ReportPdu.createFromVariables(this.pdu.id, [], {});
+ reportPdu.contextName = context;
+ var responseMessage = Message.createRequestV3(user, responseSecurityParameters, reportPdu);
+ responseMessage.msgGlobalData.msgID = this.msgGlobalData.msgID;
+ return responseMessage;
+};
- writer.endSequence ();
+Message.prototype.createResponseForRequest = function (responsePdu) {
+ if (this.version == Version3) {
+ return this.createV3ResponseFromRequest(responsePdu);
+ } else {
+ return this.createCommunityResponseFromRequest(responsePdu);
+ }
+};
- this.buffer = writer.buffer;
+Message.prototype.createCommunityResponseFromRequest = function (responsePdu) {
+ return Message.createCommunity(this.version, this.community, responsePdu);
+};
- return this.buffer;
+Message.prototype.createV3ResponseFromRequest = function (responsePdu) {
+ var responseUser = {
+ name: this.user.name,
+ level: this.user.name,
+ authProtocol: this.user.authProtocol,
+ authKey: this.user.authKey,
+ privProtocol: this.user.privProtocol,
+ privKey: this.user.privKey
+ };
+ var responseSecurityParameters = {
+ msgAuthoritativeEngineID: this.msgSecurityParameters.msgAuthoritativeEngineID,
+ msgAuthoritativeEngineBoots: this.msgSecurityParameters.msgAuthoritativeEngineBoots,
+ msgAuthoritativeEngineTime: this.msgSecurityParameters.msgAuthoritativeEngineTime,
+ msgUserName: this.msgSecurityParameters.msgUserName,
+ msgAuthenticationParameters: "",
+ msgPrivacyParameters: ""
+ };
+ var responseGlobalData = {
+ msgID: this.msgGlobalData.msgID,
+ msgMaxSize: 65507,
+ msgFlags: this.msgGlobalData.msgFlags & (255 - 4),
+ msgSecurityModel: 3
+ };
+ return Message.createV3(responseUser, responseGlobalData, responseSecurityParameters, responsePdu);
};
-var ResponseMessage = function (buffer) {
- var reader = new ber.Reader (buffer);
+Message.createCommunity = function (version, community, pdu) {
+ var message = new Message();
+
+ message.version = version;
+ message.community = community;
+ message.pdu = pdu;
+
+ return message;
+};
- reader.readSequence ();
+Message.createRequestV3 = function (user, msgSecurityParameters, pdu) {
+ var authFlag = user.level == SecurityLevel.authNoPriv || user.level == SecurityLevel.authPriv ? 1 : 0;
+ var privFlag = user.level == SecurityLevel.authPriv ? 1 : 0;
+ var reportableFlag = (pdu.type == PduType.GetResponse || pdu.type == PduType.TrapV2) ? 0 : 1;
+ var msgGlobalData = {
+ msgID: _generateId(), // random ID
+ msgMaxSize: 65507,
+ msgFlags: reportableFlag * 4 | privFlag * 2 | authFlag * 1,
+ msgSecurityModel: 3
+ };
+ return Message.createV3(user, msgGlobalData, msgSecurityParameters, pdu);
+};
- this.version = reader.readInt ();
- this.community = reader.readString ();
+Message.createV3 = function (user, msgGlobalData, msgSecurityParameters, pdu) {
+ var message = new Message();
+
+ message.version = 3;
+ message.user = user;
+ message.msgGlobalData = msgGlobalData;
+ message.msgSecurityParameters = {
+ msgAuthoritativeEngineID: msgSecurityParameters.msgAuthoritativeEngineID || Buffer.from(""),
+ msgAuthoritativeEngineBoots: msgSecurityParameters.msgAuthoritativeEngineBoots || 0,
+ msgAuthoritativeEngineTime: msgSecurityParameters.msgAuthoritativeEngineTime || 0,
+ msgUserName: user.name || "",
+ msgAuthenticationParameters: "",
+ msgPrivacyParameters: ""
+ };
+ message.pdu = pdu;
+
+ return message;
+};
- var type = reader.peek ();
+Message.createDiscoveryV3 = function (pdu) {
+ var msgSecurityParameters = {
+ msgAuthoritativeEngineID: Buffer.from(""),
+ msgAuthoritativeEngineBoots: 0,
+ msgAuthoritativeEngineTime: 0
+ };
+ var emptyUser = {
+ name: "",
+ level: SecurityLevel.noAuthNoPriv
+ };
+ return Message.createRequestV3(emptyUser, msgSecurityParameters, pdu);
+}
- if (type == PduType.GetResponse) {
- this.pdu = new GetResponsePdu (reader);
- } else {
- throw new ResponseInvalidError ("Unknown PDU type '" + type
- + "' in response");
- }
+Message.createFromBuffer = function (buffer, user) {
+ var reader = new ber.Reader(buffer);
+ var message = new Message();
+
+ reader.readSequence();
+
+ message.version = reader.readInt();
+
+ if (message.version != 3) {
+ message.community = reader.readString();
+ message.pdu = readPdu(reader, false);
+ } else {
+ // HeaderData
+ message.msgGlobalData = {};
+ reader.readSequence();
+ message.msgGlobalData.msgID = reader.readInt();
+ message.msgGlobalData.msgMaxSize = reader.readInt();
+ message.msgGlobalData.msgFlags = reader.readString(ber.OctetString, true)[0];
+ message.msgGlobalData.msgSecurityModel = reader.readInt();
+
+ // msgSecurityParameters
+ message.msgSecurityParameters = {};
+ var msgSecurityParametersReader = new ber.Reader(reader.readString(ber.OctetString, true));
+ msgSecurityParametersReader.readSequence();
+ message.msgSecurityParameters.msgAuthoritativeEngineID = msgSecurityParametersReader.readString(ber.OctetString, true);
+ message.msgSecurityParameters.msgAuthoritativeEngineBoots = msgSecurityParametersReader.readInt();
+ message.msgSecurityParameters.msgAuthoritativeEngineTime = msgSecurityParametersReader.readInt();
+ message.msgSecurityParameters.msgUserName = msgSecurityParametersReader.readString();
+ message.msgSecurityParameters.msgAuthenticationParameters = Buffer.from(msgSecurityParametersReader.readString(ber.OctetString, true));
+ message.msgSecurityParameters.msgPrivacyParameters = Buffer.from(msgSecurityParametersReader.readString(ber.OctetString, true));
+ scopedPdu = true;
+
+ if (message.hasPrivacy()) {
+ message.encryptedPdu = reader.readString(ber.OctetString, true);
+ message.pdu = null;
+ } else {
+ message.pdu = readPdu(reader, true);
+ }
+ }
+
+ message.buffer = buffer;
+
+ return message;
};
+
+var Req = function (session, message, feedCb, responseCb, options) {
+
+ this.message = message;
+ this.responseCb = responseCb;
+ this.retries = session.retries;
+ this.timeout = session.timeout;
+ this.onResponse = session.onSimpleGetResponse;
+ this.feedCb = feedCb;
+ this.port = (options && options.port) ? options.port : session.port;
+ this.context = session.context;
+};
+
+Req.prototype.getId = function () {
+ return this.message.getReqId();
+};
+
+
/*****************************************************************************
** Session class definition
**/
-var Session = function (target, community, options) {
- this.target = target || "127.0.0.1";
- this.community = community || "public";
-
- this.version = (options && options.version)
- ? options.version
- : Version1;
-
- this.transport = (options && options.transport)
- ? options.transport
- : "udp4";
- this.port = (options && options.port )
- ? options.port
- : 161;
- this.trapPort = (options && options.trapPort )
- ? options.trapPort
- : 162;
-
- this.retries = (options && (options.retries || options.retries == 0))
- ? options.retries
- : 1;
- this.timeout = (options && options.timeout)
- ? options.timeout
- : 5000;
-
- this.sourceAddress = (options && options.sourceAddress )
- ? options.sourceAddress
- : undefined;
- this.sourcePort = (options && options.sourcePort )
- ? parseInt(options.sourcePort)
- : undefined;
-
- this.idBitsSize = (options && options.idBitsSize)
- ? parseInt(options.idBitsSize)
- : 32;
-
- this.reqs = {};
- this.reqCount = 0;
-
- this.dgram = dgram.createSocket (this.transport);
- this.dgram.unref();
-
- var me = this;
- this.dgram.on ("message", me.onMsg.bind (me));
- this.dgram.on ("close", me.onClose.bind (me));
- this.dgram.on ("error", me.onError.bind (me));
-
- if (this.sourceAddress || this.sourcePort)
- this.dgram.bind (this.sourcePort, this.sourceAddress);
-};
-
-util.inherits (Session, events.EventEmitter);
+var Session = function (target, authenticator, options) {
+ this.target = target || "127.0.0.1";
+
+ this.version = (options && options.version)
+ ? options.version
+ : Version1;
+
+ if (this.version == Version3) {
+ this.user = authenticator;
+ } else {
+ this.community = authenticator || "public";
+ }
+
+ this.transport = (options && options.transport)
+ ? options.transport
+ : "udp4";
+ this.port = (options && options.port)
+ ? options.port
+ : 161;
+ this.trapPort = (options && options.trapPort)
+ ? options.trapPort
+ : 162;
+
+ this.retries = (options && (options.retries || options.retries == 0))
+ ? options.retries
+ : 1;
+ this.timeout = (options && options.timeout)
+ ? options.timeout
+ : 5000;
+
+ this.sourceAddress = (options && options.sourceAddress)
+ ? options.sourceAddress
+ : undefined;
+ this.sourcePort = (options && options.sourcePort)
+ ? parseInt(options.sourcePort)
+ : undefined;
+
+ this.idBitsSize = (options && options.idBitsSize)
+ ? parseInt(options.idBitsSize)
+ : 32;
+
+ this.context = (options && options.context) ? options.context : "";
+
+ DEBUG = options.debug;
+
+ this.reqs = {};
+ this.reqCount = 0;
+
+ this.dgram = dgram.createSocket(this.transport);
+ this.dgram.unref();
+
+ var me = this;
+ this.dgram.on("message", me.onMsg.bind(me));
+ this.dgram.on("close", me.onClose.bind(me));
+ this.dgram.on("error", me.onError.bind(me));
+
+ if (this.sourceAddress || this.sourcePort)
+ this.dgram.bind(this.sourcePort, this.sourceAddress);
+};
+
+util.inherits(Session, events.EventEmitter);
Session.prototype.close = function () {
- this.dgram.close ();
- return this;
+ this.dgram.close();
+ return this;
};
Session.prototype.cancelRequests = function (error) {
- var id;
- for (id in this.reqs) {
- var req = this.reqs[id];
- this.unregisterRequest (req.id);
- req.responseCb (error);
- }
-};
-
-function _generateId (bitSize) {
- if (bitSize === 16) {
- return Math.floor(Math.random() * 10000) % 65535;
- }
- return Math.floor(Math.random() * 100000000) % 4294967295;
+ var id;
+ for (id in this.reqs) {
+ var req = this.reqs[id];
+ this.unregisterRequest(req.getId());
+ req.responseCb(error);
+ }
+};
+
+function _generateId(bitSize) {
+ if (bitSize === 16) {
+ return Math.floor(Math.random() * 10000) % 65535;
+ }
+ return Math.floor(Math.random() * 100000000) % 4294967295;
}
Session.prototype.get = function (oids, responseCb) {
- function feedCb (req, message) {
- var pdu = message.pdu;
- var varbinds = [];
-
- if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
- req.responseCb (new ResponseInvalidError ("Requested OIDs do not "
- + "match response OIDs"));
- } else {
- for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
- if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
- req.responseCb (new ResponseInvalidError ("OID '"
- + req.message.pdu.varbinds[i].oid
- + "' in request at positiion '" + i + "' does not "
- + "match OID '" + pdu.varbinds[i].oid + "' in response "
- + "at position '" + i + "'"));
- return;
- } else {
- varbinds.push (pdu.varbinds[i]);
- }
- }
-
- req.responseCb (null, varbinds);
- }
- }
-
- var pduVarbinds = [];
-
- for (var i = 0; i < oids.length; i++) {
- var varbind = {
- oid: oids[i]
- };
- pduVarbinds.push (varbind);
- }
-
- this.simpleGet (GetRequestPdu, feedCb, pduVarbinds, responseCb);
-
- return this;
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb(new ResponseInvalidError("Requested OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid
+ + "' in request at positiion '" + i + "' does not "
+ + "match OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < oids.length; i++) {
+ var varbind = {
+ oid: oids[i]
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ this.simpleGet(GetRequestPdu, feedCb, pduVarbinds, responseCb);
+
+ return this;
};
Session.prototype.getBulk = function () {
- var oids, nonRepeaters, maxRepetitions, responseCb;
-
- if (arguments.length >= 4) {
- oids = arguments[0];
- nonRepeaters = arguments[1];
- maxRepetitions = arguments[2];
- responseCb = arguments[3];
- } else if (arguments.length >= 3) {
- oids = arguments[0];
- nonRepeaters = arguments[1];
- maxRepetitions = 10;
- responseCb = arguments[2];
- } else {
- oids = arguments[0];
- nonRepeaters = 0;
- maxRepetitions = 10;
- responseCb = arguments[1];
- }
-
- function feedCb (req, message) {
- var pdu = message.pdu;
- var varbinds = [];
- var i = 0;
-
- // first walk through and grab non-repeaters
- if (pdu.varbinds.length < nonRepeaters) {
- req.responseCb (new ResponseInvalidError ("Varbind count in "
- + "response '" + pdu.varbinds.length + "' is less than "
- + "non-repeaters '" + nonRepeaters + "' in request"));
- } else {
- for ( ; i < nonRepeaters; i++) {
- if (isVarbindError (pdu.varbinds[i])) {
- varbinds.push (pdu.varbinds[i]);
- } else if (! oidFollowsOid (req.message.pdu.varbinds[i].oid,
- pdu.varbinds[i].oid)) {
- req.responseCb (new ResponseInvalidError ("OID '"
- + req.message.pdu.varbinds[i].oid + "' in request at "
- + "positiion '" + i + "' does not precede "
- + "OID '" + pdu.varbinds[i].oid + "' in response "
- + "at position '" + i + "'"));
- return;
- } else {
- varbinds.push (pdu.varbinds[i]);
- }
- }
- }
-
- var repeaters = req.message.pdu.varbinds.length - nonRepeaters;
-
- // secondly walk through and grab repeaters
- if (pdu.varbinds.length % (repeaters)) {
- req.responseCb (new ResponseInvalidError ("Varbind count in "
- + "response '" + pdu.varbinds.length + "' is not a "
- + "multiple of repeaters '" + repeaters
- + "' plus non-repeaters '" + nonRepeaters + "' in request"));
- } else {
- while (i < pdu.varbinds.length) {
- for (var j = 0; j < repeaters; j++, i++) {
- var reqIndex = nonRepeaters + j;
- var respIndex = i;
-
- if (isVarbindError (pdu.varbinds[respIndex])) {
- if (! varbinds[reqIndex])
- varbinds[reqIndex] = [];
- varbinds[reqIndex].push (pdu.varbinds[respIndex]);
- } else if (! oidFollowsOid (
- req.message.pdu.varbinds[reqIndex].oid,
- pdu.varbinds[respIndex].oid)) {
- req.responseCb (new ResponseInvalidError ("OID '"
- + req.message.pdu.varbinds[reqIndex].oid
- + "' in request at positiion '" + (reqIndex)
- + "' does not precede OID '"
- + pdu.varbinds[respIndex].oid
- + "' in response at position '" + (respIndex) + "'"));
- return;
- } else {
- if (! varbinds[reqIndex])
- varbinds[reqIndex] = [];
- varbinds[reqIndex].push (pdu.varbinds[respIndex]);
- }
- }
- }
- }
-
- req.responseCb (null, varbinds);
- }
-
- var pduVarbinds = [];
-
- for (var i = 0; i < oids.length; i++) {
- var varbind = {
- oid: oids[i]
- };
- pduVarbinds.push (varbind);
- }
-
- var options = {
- nonRepeaters: nonRepeaters,
- maxRepetitions: maxRepetitions
- };
-
- this.simpleGet (GetBulkRequestPdu, feedCb, pduVarbinds, responseCb,
- options);
-
- return this;
+ var oids, nonRepeaters, maxRepetitions, responseCb;
+
+ if (arguments.length >= 4) {
+ oids = arguments[0];
+ nonRepeaters = arguments[1];
+ maxRepetitions = arguments[2];
+ responseCb = arguments[3];
+ } else if (arguments.length >= 3) {
+ oids = arguments[0];
+ nonRepeaters = arguments[1];
+ maxRepetitions = 10;
+ responseCb = arguments[2];
+ } else {
+ oids = arguments[0];
+ nonRepeaters = 0;
+ maxRepetitions = 10;
+ responseCb = arguments[1];
+ }
+
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+ var i = 0;
+
+ // first walk through and grab non-repeaters
+ if (pdu.varbinds.length < nonRepeaters) {
+ req.responseCb(new ResponseInvalidError("Varbind count in "
+ + "response '" + pdu.varbinds.length + "' is less than "
+ + "non-repeaters '" + nonRepeaters + "' in request"));
+ } else {
+ for (; i < nonRepeaters; i++) {
+ if (isVarbindError(pdu.varbinds[i])) {
+ varbinds.push(pdu.varbinds[i]);
+ } else if (!oidFollowsOid(req.message.pdu.varbinds[i].oid,
+ pdu.varbinds[i].oid)) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid + "' in request at "
+ + "positiion '" + i + "' does not precede "
+ + "OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+ }
+
+ var repeaters = req.message.pdu.varbinds.length - nonRepeaters;
+
+ // secondly walk through and grab repeaters
+ if (pdu.varbinds.length % (repeaters)) {
+ req.responseCb(new ResponseInvalidError("Varbind count in "
+ + "response '" + pdu.varbinds.length + "' is not a "
+ + "multiple of repeaters '" + repeaters
+ + "' plus non-repeaters '" + nonRepeaters + "' in request"));
+ } else {
+ while (i < pdu.varbinds.length) {
+ for (var j = 0; j < repeaters; j++, i++) {
+ var reqIndex = nonRepeaters + j;
+ var respIndex = i;
+
+ if (isVarbindError(pdu.varbinds[respIndex])) {
+ if (!varbinds[reqIndex])
+ varbinds[reqIndex] = [];
+ varbinds[reqIndex].push(pdu.varbinds[respIndex]);
+ } else if (!oidFollowsOid(
+ req.message.pdu.varbinds[reqIndex].oid,
+ pdu.varbinds[respIndex].oid)) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[reqIndex].oid
+ + "' in request at positiion '" + (reqIndex)
+ + "' does not precede OID '"
+ + pdu.varbinds[respIndex].oid
+ + "' in response at position '" + (respIndex) + "'"));
+ return;
+ } else {
+ if (!varbinds[reqIndex])
+ varbinds[reqIndex] = [];
+ varbinds[reqIndex].push(pdu.varbinds[respIndex]);
+ }
+ }
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < oids.length; i++) {
+ var varbind = {
+ oid: oids[i]
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ var options = {
+ nonRepeaters: nonRepeaters,
+ maxRepetitions: maxRepetitions
+ };
+
+ this.simpleGet(GetBulkRequestPdu, feedCb, pduVarbinds, responseCb,
+ options);
+
+ return this;
};
Session.prototype.getNext = function (oids, responseCb) {
- function feedCb (req, message) {
- var pdu = message.pdu;
- var varbinds = [];
-
- if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
- req.responseCb (new ResponseInvalidError ("Requested OIDs do not "
- + "match response OIDs"));
- } else {
- for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
- if (isVarbindError (pdu.varbinds[i])) {
- varbinds.push (pdu.varbinds[i]);
- } else if (! oidFollowsOid (req.message.pdu.varbinds[i].oid,
- pdu.varbinds[i].oid)) {
- req.responseCb (new ResponseInvalidError ("OID '"
- + req.message.pdu.varbinds[i].oid + "' in request at "
- + "positiion '" + i + "' does not precede "
- + "OID '" + pdu.varbinds[i].oid + "' in response "
- + "at position '" + i + "'"));
- return;
- } else {
- varbinds.push (pdu.varbinds[i]);
- }
- }
-
- req.responseCb (null, varbinds);
- }
- }
-
- var pduVarbinds = [];
-
- for (var i = 0; i < oids.length; i++) {
- var varbind = {
- oid: oids[i]
- };
- pduVarbinds.push (varbind);
- }
-
- this.simpleGet (GetNextRequestPdu, feedCb, pduVarbinds, responseCb);
-
- return this;
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb(new ResponseInvalidError("Requested OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (isVarbindError(pdu.varbinds[i])) {
+ varbinds.push(pdu.varbinds[i]);
+ } else if (!oidFollowsOid(req.message.pdu.varbinds[i].oid,
+ pdu.varbinds[i].oid)) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid + "' in request at "
+ + "positiion '" + i + "' does not precede "
+ + "OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < oids.length; i++) {
+ var varbind = {
+ oid: oids[i]
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ this.simpleGet(GetNextRequestPdu, feedCb, pduVarbinds, responseCb);
+
+ return this;
};
Session.prototype.inform = function () {
- var typeOrOid = arguments[0];
- var varbinds, options = {}, responseCb;
-
- /**
- ** Support the following signatures:
- **
- ** typeOrOid, varbinds, options, callback
- ** typeOrOid, varbinds, callback
- ** typeOrOid, options, callback
- ** typeOrOid, callback
- **/
- if (arguments.length >= 4) {
- varbinds = arguments[1];
- options = arguments[2];
- responseCb = arguments[3];
- } else if (arguments.length >= 3) {
- if (arguments[1].constructor != Array) {
- varbinds = [];
- options = arguments[1];
- responseCb = arguments[2];
- } else {
- varbinds = arguments[1];
- responseCb = arguments[2];
- }
- } else {
- varbinds = [];
- responseCb = arguments[1];
- }
-
- function feedCb (req, message) {
- var pdu = message.pdu;
- var varbinds = [];
-
- if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
- req.responseCb (new ResponseInvalidError ("Inform OIDs do not "
- + "match response OIDs"));
- } else {
- for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
- if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
- req.responseCb (new ResponseInvalidError ("OID '"
- + req.message.pdu.varbinds[i].oid
- + "' in inform at positiion '" + i + "' does not "
- + "match OID '" + pdu.varbinds[i].oid + "' in response "
- + "at position '" + i + "'"));
- return;
- } else {
- varbinds.push (pdu.varbinds[i]);
- }
- }
-
- req.responseCb (null, varbinds);
- }
- }
-
- if (typeof typeOrOid != "string")
- typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1);
-
- var pduVarbinds = [
- {
- oid: "1.3.6.1.2.1.1.3.0",
- type: ObjectType.TimeTicks,
- value: options.upTime || Math.floor (process.uptime () * 100)
- },
- {
- oid: "1.3.6.1.6.3.1.1.4.1.0",
- type: ObjectType.OID,
- value: typeOrOid
- }
- ];
-
- for (var i = 0; i < varbinds.length; i++) {
- var varbind = {
- oid: varbinds[i].oid,
- type: varbinds[i].type,
- value: varbinds[i].value
- };
- pduVarbinds.push (varbind);
- }
-
- options.port = this.trapPort;
-
- this.simpleGet (InformRequestPdu, feedCb, pduVarbinds, responseCb, options);
-
- return this;
+ var typeOrOid = arguments[0];
+ var varbinds, options = {}, responseCb;
+
+ /**
+ ** Support the following signatures:
+ **
+ ** typeOrOid, varbinds, options, callback
+ ** typeOrOid, varbinds, callback
+ ** typeOrOid, options, callback
+ ** typeOrOid, callback
+ **/
+ if (arguments.length >= 4) {
+ varbinds = arguments[1];
+ options = arguments[2];
+ responseCb = arguments[3];
+ } else if (arguments.length >= 3) {
+ if (arguments[1].constructor != Array) {
+ varbinds = [];
+ options = arguments[1];
+ responseCb = arguments[2];
+ } else {
+ varbinds = arguments[1];
+ responseCb = arguments[2];
+ }
+ } else {
+ varbinds = [];
+ responseCb = arguments[1];
+ }
+
+ if (this.version == Version1) {
+ responseCb(new RequestInvalidError("Inform not allowed for SNMPv1"));
+ return;
+ }
+
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb(new ResponseInvalidError("Inform OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid
+ + "' in inform at positiion '" + i + "' does not "
+ + "match OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+ }
+
+ if (typeof typeOrOid != "string")
+ typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1);
+
+ var pduVarbinds = [
+ {
+ oid: "1.3.6.1.2.1.1.3.0",
+ type: ObjectType.TimeTicks,
+ value: options.upTime || Math.floor(process.uptime() * 100)
+ },
+ {
+ oid: "1.3.6.1.6.3.1.1.4.1.0",
+ type: ObjectType.OID,
+ value: typeOrOid
+ }
+ ];
+
+ for (var i = 0; i < varbinds.length; i++) {
+ var varbind = {
+ oid: varbinds[i].oid,
+ type: varbinds[i].type,
+ value: varbinds[i].value
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ options.port = this.trapPort;
+
+ this.simpleGet(InformRequestPdu, feedCb, pduVarbinds, responseCb, options);
+
+ return this;
};
Session.prototype.onClose = function () {
- this.cancelRequests (new Error ("Socket forcibly closed"));
- this.emit ("close");
+ this.cancelRequests(new Error("Socket forcibly closed"));
+ this.emit("close");
};
Session.prototype.onError = function (error) {
- this.emit (error);
-};
-
-Session.prototype.onMsg = function (buffer, remote) {
- try {
- var message = new ResponseMessage (buffer);
-
- var req = this.unregisterRequest (message.pdu.id);
- if (! req)
- return;
-
- try {
- if (message.version != req.message.version) {
- req.responseCb (new ResponseInvalidError ("Version in request '"
- + req.message.version + "' does not match version in "
- + "response '" + message.version));
- } else if (message.community != req.message.community) {
- req.responseCb (new ResponseInvalidError ("Community '"
- + req.message.community + "' in request does not match "
- + "community '" + message.community + "' in response"));
- } else if (message.pdu.type == PduType.GetResponse) {
- req.onResponse (req, message);
- } else {
- req.responseCb (new ResponseInvalidError ("Unknown PDU type '"
- + message.pdu.type + "' in response"));
- }
- } catch (error) {
- req.responseCb (error);
- }
- } catch (error) {
- this.emit("error", error);
- }
+ this.emit(error);
+};
+
+Session.prototype.onMsg = function (buffer) {
+ try {
+ var message = Message.createFromBuffer(buffer);
+
+ var req = this.unregisterRequest(message.getReqId());
+ if (!req)
+ return;
+
+ if (!message.processIncomingSecurity(this.user, req.responseCb))
+ return;
+
+ try {
+ if (message.version != req.message.version) {
+ req.responseCb(new ResponseInvalidError("Version in request '"
+ + req.message.version + "' does not match version in "
+ + "response '" + message.version + "'"));
+ } else if (message.community != req.message.community) {
+ req.responseCb(new ResponseInvalidError("Community '"
+ + req.message.community + "' in request does not match "
+ + "community '" + message.community + "' in response"));
+ } else if (message.pdu.type == PduType.GetResponse) {
+ req.onResponse(req, message);
+ } else if (message.pdu.type == PduType.Report) {
+ if (!req.originalPdu) {
+ req.responseCb(new ResponseInvalidError("Unexpected Report PDU"));
+ return;
+ }
+ this.msgSecurityParameters = {
+ msgAuthoritativeEngineID: message.msgSecurityParameters.msgAuthoritativeEngineID,
+ msgAuthoritativeEngineBoots: message.msgSecurityParameters.msgAuthoritativeEngineBoots,
+ msgAuthoritativeEngineTime: message.msgSecurityParameters.msgAuthoritativeEngineTime
+ };
+ req.originalPdu.contextName = this.context;
+ this.sendV3Req(req.originalPdu, req.feedCb, req.responseCb, req.options, req.port);
+ } else {
+ req.responseCb(new ResponseInvalidError("Unknown PDU type '"
+ + message.pdu.type + "' in response"));
+ }
+ } catch (error) {
+ req.responseCb(error);
+ }
+ } catch (error) {
+ this.emit("error", error);
+ }
};
Session.prototype.onSimpleGetResponse = function (req, message) {
- var pdu = message.pdu;
-
- if (pdu.errorStatus > 0) {
- var statusString = ErrorStatus[pdu.errorStatus]
- || ErrorStatus.GeneralError;
- var statusCode = ErrorStatus[statusString]
- || ErrorStatus[ErrorStatus.GeneralError];
-
- if (pdu.errorIndex <= 0 || pdu.errorIndex > pdu.varbinds.length) {
- req.responseCb (new RequestFailedError (statusString, statusCode));
- } else {
- var oid = pdu.varbinds[pdu.errorIndex - 1].oid;
- var error = new RequestFailedError (statusString + ": " + oid,
- statusCode);
- req.responseCb (error);
- }
- } else {
- req.feedCb (req, message);
- }
+ var pdu = message.pdu;
+
+ if (pdu.errorStatus > 0) {
+ var statusString = ErrorStatus[pdu.errorStatus]
+ || ErrorStatus.GeneralError;
+ var statusCode = ErrorStatus[statusString]
+ || ErrorStatus[ErrorStatus.GeneralError];
+
+ if (pdu.errorIndex <= 0 || pdu.errorIndex > pdu.varbinds.length) {
+ req.responseCb(new RequestFailedError(statusString, statusCode));
+ } else {
+ var oid = pdu.varbinds[pdu.errorIndex - 1].oid;
+ var error = new RequestFailedError(statusString + ": " + oid,
+ statusCode);
+ req.responseCb(error);
+ }
+ } else {
+ req.feedCb(req, message);
+ }
};
Session.prototype.registerRequest = function (req) {
- if (! this.reqs[req.id]) {
- this.reqs[req.id] = req;
- if (this.reqCount <= 0)
- this.dgram.ref();
- this.reqCount++;
- }
- var me = this;
- req.timer = setTimeout (function () {
- if (req.retries-- > 0) {
- me.send (req);
- } else {
- me.unregisterRequest (req.id);
- req.responseCb (new RequestTimedOutError (
- "Request timed out"));
- }
- }, req.timeout);
+ if (!this.reqs[req.getId()]) {
+ this.reqs[req.getId()] = req;
+ if (this.reqCount <= 0)
+ this.dgram.ref();
+ this.reqCount++;
+ }
+ var me = this;
+ req.timer = setTimeout(function () {
+ if (req.retries-- > 0) {
+ me.send(req);
+ } else {
+ me.unregisterRequest(req.getId());
+ req.responseCb(new RequestTimedOutError(
+ "Request timed out"));
+ }
+ }, req.timeout);
};
Session.prototype.send = function (req, noWait) {
- try {
- var me = this;
-
- var buffer = req.message.toBuffer ();
-
- this.dgram.send (buffer, 0, buffer.length, req.port, this.target,
- function (error, bytes) {
- if (error) {
- req.responseCb (error);
- } else {
- if (noWait) {
- req.responseCb (null);
- } else {
- me.registerRequest (req);
- }
- }
- });
- } catch (error) {
- req.responseCb (error);
- }
-
- return this;
+ try {
+ var me = this;
+
+ var buffer = req.message.toBuffer();
+
+ this.dgram.send(buffer, 0, buffer.length, req.port, this.target,
+ function (error, bytes) {
+ if (error) {
+ req.responseCb(error);
+ } else {
+ if (noWait) {
+ req.responseCb(null);
+ } else {
+ me.registerRequest(req);
+ }
+ }
+ });
+ } catch (error) {
+ req.responseCb(error);
+ }
+
+ return this;
};
Session.prototype.set = function (varbinds, responseCb) {
- function feedCb (req, message) {
- var pdu = message.pdu;
- var varbinds = [];
-
- if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
- req.responseCb (new ResponseInvalidError ("Requested OIDs do not "
- + "match response OIDs"));
- } else {
- for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
- if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
- req.responseCb (new ResponseInvalidError ("OID '"
- + req.message.pdu.varbinds[i].oid
- + "' in request at positiion '" + i + "' does not "
- + "match OID '" + pdu.varbinds[i].oid + "' in response "
- + "at position '" + i + "'"));
- return;
- } else {
- varbinds.push (pdu.varbinds[i]);
- }
- }
-
- req.responseCb (null, varbinds);
- }
- }
-
- var pduVarbinds = [];
-
- for (var i = 0; i < varbinds.length; i++) {
- var varbind = {
- oid: varbinds[i].oid,
- type: varbinds[i].type,
- value: varbinds[i].value
- };
- pduVarbinds.push (varbind);
- }
-
- this.simpleGet (SetRequestPdu, feedCb, pduVarbinds, responseCb);
-
- return this;
+ function feedCb(req, message) {
+ var pdu = message.pdu;
+ var varbinds = [];
+
+ if (req.message.pdu.varbinds.length != pdu.varbinds.length) {
+ req.responseCb(new ResponseInvalidError("Requested OIDs do not "
+ + "match response OIDs"));
+ } else {
+ for (var i = 0; i < req.message.pdu.varbinds.length; i++) {
+ if (req.message.pdu.varbinds[i].oid != pdu.varbinds[i].oid) {
+ req.responseCb(new ResponseInvalidError("OID '"
+ + req.message.pdu.varbinds[i].oid
+ + "' in request at positiion '" + i + "' does not "
+ + "match OID '" + pdu.varbinds[i].oid + "' in response "
+ + "at position '" + i + "'"));
+ return;
+ } else {
+ varbinds.push(pdu.varbinds[i]);
+ }
+ }
+
+ req.responseCb(null, varbinds);
+ }
+ }
+
+ var pduVarbinds = [];
+
+ for (var i = 0; i < varbinds.length; i++) {
+ var varbind = {
+ oid: varbinds[i].oid,
+ type: varbinds[i].type,
+ value: varbinds[i].value
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ this.simpleGet(SetRequestPdu, feedCb, pduVarbinds, responseCb);
+
+ return this;
};
Session.prototype.simpleGet = function (pduClass, feedCb, varbinds,
- responseCb, options) {
- var req = {};
-
- try {
- var id = _generateId (this.idBitsSize);
- var pdu = new pduClass (id, varbinds, options);
- var message = new RequestMessage (this.version, this.community, pdu);
-
- req = {
- id: id,
- message: message,
- responseCb: responseCb,
- retries: this.retries,
- timeout: this.timeout,
- onResponse: this.onSimpleGetResponse,
- feedCb: feedCb,
- port: (options && options.port) ? options.port : this.port
- };
-
- this.send (req);
- } catch (error) {
- if (req.responseCb)
- req.responseCb (error);
- }
-};
-
-function subtreeCb (req, varbinds) {
- var done = 0;
-
- for (var i = varbinds.length; i > 0; i--) {
- if (! oidInSubtree (req.baseOid, varbinds[i - 1].oid)) {
- done = 1;
- varbinds.pop ();
- }
- }
-
- if (varbinds.length > 0)
- req.feedCb (varbinds);
-
- if (done)
- return true;
+ responseCb, options) {
+ try {
+ var id = _generateId(this.idBitsSize);
+ var pdu = SimplePdu.createFromVariables(pduClass, id, varbinds, options);
+ var message;
+ var req;
+
+ if (this.version == Version3) {
+ if (this.msgSecurityParameters) {
+ this.sendV3Req(pdu, feedCb, responseCb, options, this.port);
+ } else {
+ // SNMPv3 discovery
+ var discoveryPdu = createDiscoveryPdu(this.context);
+ var discoveryMessage = Message.createDiscoveryV3(discoveryPdu);
+ var discoveryReq = new Req(this, discoveryMessage, feedCb, responseCb, options);
+ discoveryReq.originalPdu = pdu;
+ this.send(discoveryReq);
+ }
+ } else {
+ message = Message.createCommunity(this.version, this.community, pdu);
+ req = new Req(this, message, feedCb, responseCb, options);
+ this.send(req);
+ }
+ } catch (error) {
+ if (responseCb)
+ responseCb(error);
+ }
}
-Session.prototype.subtree = function () {
- var me = this;
- var oid = arguments[0];
- var maxRepetitions, feedCb, doneCb;
-
- if (arguments.length < 4) {
- maxRepetitions = 20;
- feedCb = arguments[1];
- doneCb = arguments[2];
- } else {
- maxRepetitions = arguments[1];
- feedCb = arguments[2];
- doneCb = arguments[3];
- }
-
- var req = {
- feedCb: feedCb,
- doneCb: doneCb,
- maxRepetitions: maxRepetitions,
- baseOid: oid
- };
-
- this.walk (oid, maxRepetitions, subtreeCb.bind (me, req), doneCb);
-
- return this;
-};
-
-function tableColumnsResponseCb (req, error) {
- if (error) {
- req.responseCb (error);
- } else if (req.error) {
- req.responseCb (req.error);
- } else {
- if (req.columns.length > 0) {
- var column = req.columns.pop ();
- var me = this;
- this.subtree (req.rowOid + column, req.maxRepetitions,
- tableColumnsFeedCb.bind (me, req),
- tableColumnsResponseCb.bind (me, req));
- } else {
- req.responseCb (null, req.table);
- }
- }
+function subtreeCb(req, varbinds) {
+ var done = 0;
+
+ for (var i = varbinds.length; i > 0; i--) {
+ if (!oidInSubtree(req.baseOid, varbinds[i - 1].oid)) {
+ done = 1;
+ varbinds.pop();
+ }
+ }
+
+ if (varbinds.length > 0)
+ req.feedCb(varbinds);
+
+ if (done)
+ return true;
+}
+
+Session.prototype.subtree = function () {
+ var me = this;
+ var oid = arguments[0];
+ var maxRepetitions, feedCb, doneCb;
+
+ if (arguments.length < 4) {
+ maxRepetitions = 20;
+ feedCb = arguments[1];
+ doneCb = arguments[2];
+ } else {
+ maxRepetitions = arguments[1];
+ feedCb = arguments[2];
+ doneCb = arguments[3];
+ }
+
+ var req = {
+ feedCb: feedCb,
+ doneCb: doneCb,
+ maxRepetitions: maxRepetitions,
+ baseOid: oid
+ };
+
+ this.walk(oid, maxRepetitions, subtreeCb.bind(me, req), doneCb);
+
+ return this;
+};
+
+function tableColumnsResponseCb(req, error) {
+ if (error) {
+ req.responseCb(error);
+ } else if (req.error) {
+ req.responseCb(req.error);
+ } else {
+ if (req.columns.length > 0) {
+ var column = req.columns.pop();
+ var me = this;
+ this.subtree(req.rowOid + column, req.maxRepetitions,
+ tableColumnsFeedCb.bind(me, req),
+ tableColumnsResponseCb.bind(me, req));
+ } else {
+ req.responseCb(null, req.table);
+ }
+ }
}
-function tableColumnsFeedCb (req, varbinds) {
- for (var i = 0; i < varbinds.length; i++) {
- if (isVarbindError (varbinds[i])) {
- req.error = new RequestFailedError (varbindError (varbind[i]));
- return true;
- }
-
- var oid = varbinds[i].oid.replace (req.rowOid, "");
- if (oid && oid != varbinds[i].oid) {
- var match = oid.match (/^(\d+)\.(.+)$/);
- if (match && match[1] > 0) {
- if (! req.table[match[2]])
- req.table[match[2]] = {};
- req.table[match[2]][match[1]] = varbinds[i].value;
- }
- }
- }
+function tableColumnsFeedCb(req, varbinds) {
+ for (var i = 0; i < varbinds.length; i++) {
+ if (isVarbindError(varbinds[i])) {
+ req.error = new RequestFailedError(varbindError(varbind[i]));
+ return true;
+ }
+
+ var oid = varbinds[i].oid.replace(req.rowOid, "");
+ if (oid && oid != varbinds[i].oid) {
+ var match = oid.match(/^(\d+)\.(.+)$/);
+ if (match && match[1] > 0) {
+ if (!req.table[match[2]])
+ req.table[match[2]] = {};
+ req.table[match[2]][match[1]] = varbinds[i].value;
+ }
+ }
+ }
}
Session.prototype.tableColumns = function () {
- var me = this;
-
- var oid = arguments[0];
- var columns = arguments[1];
- var maxRepetitions, responseCb;
-
- if (arguments.length < 4) {
- responseCb = arguments[2];
- maxRepetitions = 20;
- } else {
- maxRepetitions = arguments[2];
- responseCb = arguments[3];
- }
-
- var req = {
- responseCb: responseCb,
- maxRepetitions: maxRepetitions,
- baseOid: oid,
- rowOid: oid + ".1.",
- columns: columns.slice(0),
- table: {}
- };
-
- if (req.columns.length > 0) {
- var column = req.columns.pop ();
- this.subtree (req.rowOid + column, maxRepetitions,
- tableColumnsFeedCb.bind (me, req),
- tableColumnsResponseCb.bind (me, req));
- }
-
- return this;
-};
-
-function tableResponseCb (req, error) {
- if (error)
- req.responseCb (error);
- else if (req.error)
- req.responseCb (req.error);
- else
- req.responseCb (null, req.table);
+ var me = this;
+
+ var oid = arguments[0];
+ var columns = arguments[1];
+ var maxRepetitions, responseCb;
+
+ if (arguments.length < 4) {
+ responseCb = arguments[2];
+ maxRepetitions = 20;
+ } else {
+ maxRepetitions = arguments[2];
+ responseCb = arguments[3];
+ }
+
+ var req = {
+ responseCb: responseCb,
+ maxRepetitions: maxRepetitions,
+ baseOid: oid,
+ rowOid: oid + ".1.",
+ columns: columns.slice(0),
+ table: {}
+ };
+
+ if (req.columns.length > 0) {
+ var column = req.columns.pop();
+ this.subtree(req.rowOid + column, maxRepetitions,
+ tableColumnsFeedCb.bind(me, req),
+ tableColumnsResponseCb.bind(me, req));
+ }
+
+ return this;
+};
+
+function tableResponseCb(req, error) {
+ if (error)
+ req.responseCb(error);
+ else if (req.error)
+ req.responseCb(req.error);
+ else
+ req.responseCb(null, req.table);
}
-function tableFeedCb (req, varbinds) {
- for (var i = 0; i < varbinds.length; i++) {
- if (isVarbindError (varbinds[i])) {
- req.error = new RequestFailedError (varbindError (varbind[i]));
- return true;
- }
-
- var oid = varbinds[i].oid.replace (req.rowOid, "");
- if (oid && oid != varbinds[i].oid) {
- var match = oid.match (/^(\d+)\.(.+)$/);
- if (match && match[1] > 0) {
- if (! req.table[match[2]])
- req.table[match[2]] = {};
- req.table[match[2]][match[1]] = varbinds[i].value;
- }
- }
- }
+function tableFeedCb(req, varbinds) {
+ for (var i = 0; i < varbinds.length; i++) {
+ if (isVarbindError(varbinds[i])) {
+ req.error = new RequestFailedError(varbindError(varbind[i]));
+ return true;
+ }
+
+ var oid = varbinds[i].oid.replace(req.rowOid, "");
+ if (oid && oid != varbinds[i].oid) {
+ var match = oid.match(/^(\d+)\.(.+)$/);
+ if (match && match[1] > 0) {
+ if (!req.table[match[2]])
+ req.table[match[2]] = {};
+ req.table[match[2]][match[1]] = varbinds[i].value;
+ }
+ }
+ }
}
Session.prototype.table = function () {
- var me = this;
+ var me = this;
+
+ var oid = arguments[0];
+ var maxRepetitions, responseCb;
+
+ if (arguments.length < 3) {
+ responseCb = arguments[1];
+ maxRepetitions = 20;
+ } else {
+ maxRepetitions = arguments[1];
+ responseCb = arguments[2];
+ }
+
+ var req = {
+ responseCb: responseCb,
+ maxRepetitions: maxRepetitions,
+ baseOid: oid,
+ rowOid: oid + ".1.",
+ table: {}
+ };
+
+ this.subtree(oid, maxRepetitions, tableFeedCb.bind(me, req),
+ tableResponseCb.bind(me, req));
+
+ return this;
+};
- var oid = arguments[0];
- var maxRepetitions, responseCb;
+Session.prototype.trap = function () {
+ var req = {};
+
+ try {
+ var typeOrOid = arguments[0];
+ var varbinds, options = {}, responseCb;
+ var message;
+
+ /**
+ ** Support the following signatures:
+ **
+ ** typeOrOid, varbinds, options, callback
+ ** typeOrOid, varbinds, agentAddr, callback
+ ** typeOrOid, varbinds, callback
+ ** typeOrOid, agentAddr, callback
+ ** typeOrOid, options, callback
+ ** typeOrOid, callback
+ **/
+ if (arguments.length >= 4) {
+ varbinds = arguments[1];
+ if (typeof arguments[2] == "string") {
+ options.agentAddr = arguments[2];
+ } else if (arguments[2].constructor != Array) {
+ options = arguments[2];
+ }
+ responseCb = arguments[3];
+ } else if (arguments.length >= 3) {
+ if (typeof arguments[1] == "string") {
+ varbinds = [];
+ options.agentAddr = arguments[1];
+ } else if (arguments[1].constructor != Array) {
+ varbinds = [];
+ options = arguments[1];
+ } else {
+ varbinds = arguments[1];
+ agentAddr = null;
+ }
+ responseCb = arguments[2];
+ } else {
+ varbinds = [];
+ responseCb = arguments[1];
+ }
+
+ var pdu, pduVarbinds = [];
+
+ for (var i = 0; i < varbinds.length; i++) {
+ var varbind = {
+ oid: varbinds[i].oid,
+ type: varbinds[i].type,
+ value: varbinds[i].value
+ };
+ pduVarbinds.push(varbind);
+ }
+
+ var id = _generateId(this.idBitsSize);
+
+ if (this.version == Version2c || this.version == Version3) {
+ if (typeof typeOrOid != "string")
+ typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1);
+
+ pduVarbinds.unshift(
+ {
+ oid: "1.3.6.1.2.1.1.3.0",
+ type: ObjectType.TimeTicks,
+ value: options.upTime || Math.floor(process.uptime() * 100)
+ },
+ {
+ oid: "1.3.6.1.6.3.1.1.4.1.0",
+ type: ObjectType.OID,
+ value: typeOrOid
+ }
+ );
+
+ pdu = TrapV2Pdu.createFromVariables(id, pduVarbinds, options);
+ } else {
+ pdu = TrapPdu.createFromVariables(typeOrOid, pduVarbinds, options);
+ }
+
+ if (this.version == Version3) {
+ var msgSecurityParameters = {
+ msgAuthoritativeEngineID: this.user.engineID,
+ msgAuthoritativeEngineBoots: 0,
+ msgAuthoritativeEngineTime: 0
+ };
+ message = Message.createRequestV3(this.user, msgSecurityParameters, pdu);
+ } else {
+ message = Message.createCommunity(this.version, this.community, pdu);
+ }
+
+ req = {
+ id: id,
+ message: message,
+ responseCb: responseCb,
+ port: this.trapPort
+ };
+
+ this.send(req, true);
+ } catch (error) {
+ if (req.responseCb)
+ req.responseCb(error);
+ }
+
+ return this;
+};
- if (arguments.length < 3) {
- responseCb = arguments[1];
- maxRepetitions = 20;
- } else {
- maxRepetitions = arguments[1];
- responseCb = arguments[2];
- }
+Session.prototype.unregisterRequest = function (id) {
+ var req = this.reqs[id];
+ if (req) {
+ delete this.reqs[id];
+ clearTimeout(req.timer);
+ delete req.timer;
+ this.reqCount--;
+ if (this.reqCount <= 0)
+ this.dgram.unref();
+ return req;
+ } else {
+ return null;
+ }
+};
- var req = {
- responseCb: responseCb,
- maxRepetitions: maxRepetitions,
- baseOid: oid,
- rowOid: oid + ".1.",
- table: {}
- };
+function walkCb(req, error, varbinds) {
+ var done = 0;
+ var oid;
+
+ if (error) {
+ if (error instanceof RequestFailedError) {
+ if (error.status != ErrorStatus.NoSuchName) {
+ req.doneCb(error);
+ return;
+ } else {
+ // signal the version 1 walk code below that it should stop
+ done = 1;
+ }
+ } else {
+ req.doneCb(error);
+ return;
+ }
+ }
+
+ if (this.version == Version2c || this.version == Version3) {
+ for (var i = varbinds[0].length; i > 0; i--) {
+ if (varbinds[0][i - 1].type == ObjectType.EndOfMibView) {
+ varbinds[0].pop();
+ done = 1;
+ }
+ }
+ if (req.feedCb(varbinds[0]))
+ done = 1;
+ if (!done)
+ oid = varbinds[0][varbinds[0].length - 1].oid;
+ } else {
+ if (!done) {
+ if (req.feedCb(varbinds)) {
+ done = 1;
+ } else {
+ oid = varbinds[0].oid;
+ }
+ }
+ }
+
+ if (done)
+ req.doneCb(null);
+ else
+ this.walk(oid, req.maxRepetitions, req.feedCb, req.doneCb,
+ req.baseOid);
+}
- this.subtree (oid, maxRepetitions, tableFeedCb.bind (me, req),
- tableResponseCb.bind (me, req));
+Session.prototype.walk = function () {
+ var me = this;
+ var oid = arguments[0];
+ var maxRepetitions, feedCb, doneCb, baseOid;
+
+ if (arguments.length < 4) {
+ maxRepetitions = 20;
+ feedCb = arguments[1];
+ doneCb = arguments[2];
+ } else {
+ maxRepetitions = arguments[1];
+ feedCb = arguments[2];
+ doneCb = arguments[3];
+ }
+
+ var req = {
+ maxRepetitions: maxRepetitions,
+ feedCb: feedCb,
+ doneCb: doneCb
+ };
+
+ if (this.version == Version2c || this.version == Version3)
+ this.getBulk([oid], 0, maxRepetitions,
+ walkCb.bind(me, req));
+ else
+ this.getNext([oid], walkCb.bind(me, req));
+
+ return this;
+};
- return this;
+Session.prototype.sendV3Req = function (pdu, feedCb, responseCb, options, port) {
+ var message = Message.createRequestV3(this.user, this.msgSecurityParameters, pdu);
+ var reqOptions = options || {};
+ var req = new Req(this, message, feedCb, responseCb, reqOptions);
+ req.port = port;
+ this.send(req);
};
-Session.prototype.trap = function () {
- var req = {};
-
- try {
- var typeOrOid = arguments[0];
- var varbinds, options = {}, responseCb;
-
- /**
- ** Support the following signatures:
- **
- ** typeOrOid, varbinds, options, callback
- ** typeOrOid, varbinds, agentAddr, callback
- ** typeOrOid, varbinds, callback
- ** typeOrOid, agentAddr, callback
- ** typeOrOid, options, callback
- ** typeOrOid, callback
- **/
- if (arguments.length >= 4) {
- varbinds = arguments[1];
- if (typeof arguments[2] == "string") {
- options.agentAddr = arguments[2];
- } else if (arguments[2].constructor != Array) {
- options = arguments[2];
- }
- responseCb = arguments[3];
- } else if (arguments.length >= 3) {
- if (typeof arguments[1] == "string") {
- varbinds = [];
- options.agentAddr = arguments[1];
- } else if (arguments[1].constructor != Array) {
- varbinds = [];
- options = arguments[1];
- } else {
- varbinds = arguments[1];
- agentAddr = null;
- }
- responseCb = arguments[2];
- } else {
- varbinds = [];
- responseCb = arguments[1];
- }
-
- var pdu, pduVarbinds = [];
-
- for (var i = 0; i < varbinds.length; i++) {
- var varbind = {
- oid: varbinds[i].oid,
- type: varbinds[i].type,
- value: varbinds[i].value
- };
- pduVarbinds.push (varbind);
- }
-
- var id = _generateId (this.idBitsSize);
-
- if (this.version == Version2c) {
- if (typeof typeOrOid != "string")
- typeOrOid = "1.3.6.1.6.3.1.1.5." + (typeOrOid + 1);
-
- pduVarbinds.unshift (
- {
- oid: "1.3.6.1.2.1.1.3.0",
- type: ObjectType.TimeTicks,
- value: options.upTime || Math.floor (process.uptime () * 100)
- },
- {
- oid: "1.3.6.1.6.3.1.1.4.1.0",
- type: ObjectType.OID,
- value: typeOrOid
- }
- );
-
- pdu = new TrapV2Pdu (id, pduVarbinds, options);
- } else {
- pdu = new TrapPdu (typeOrOid, pduVarbinds, options);
- }
-
- var message = new RequestMessage (this.version, this.community, pdu);
-
- req = {
- id: id,
- message: message,
- responseCb: responseCb,
- port: this.trapPort
- };
-
- this.send (req, true);
- } catch (error) {
- if (req.responseCb)
- req.responseCb (error);
- }
-
- return this;
+var Engine = function (engineID, engineBoots, engineTime) {
+ if (engineID) {
+ this.engineID = Buffer.from(engineID, 'hex');
+ } else {
+ this.generateEngineID();
+ }
+ this.engineBoots = 0;
+ this.engineTime = 10;
};
-Session.prototype.unregisterRequest = function (id) {
- var req = this.reqs[id];
- if (req) {
- delete this.reqs[id];
- clearTimeout (req.timer);
- delete req.timer;
- this.reqCount--;
- if (this.reqCount <= 0)
- this.dgram.unref();
- return req;
- } else {
- return null;
- }
-};
-
-function walkCb (req, error, varbinds) {
- var done = 0;
- var oid;
-
- if (error) {
- if (error instanceof RequestFailedError) {
- if (error.status != ErrorStatus.NoSuchName) {
- req.doneCb (error);
- return;
- } else {
- // signal the version 1 walk code below that it should stop
- done = 1;
- }
- } else {
- req.doneCb (error);
- return;
- }
- }
-
- if (this.version == Version2c) {
- for (var i = varbinds[0].length; i > 0; i--) {
- if (varbinds[0][i - 1].type == ObjectType.EndOfMibView) {
- varbinds[0].pop ();
- done = 1;
- }
- }
- if (req.feedCb (varbinds[0]))
- done = 1;
- if (! done)
- oid = varbinds[0][varbinds[0].length - 1].oid;
- } else {
- if (! done) {
- if (req.feedCb (varbinds)) {
- done = 1;
- } else {
- oid = varbinds[0].oid;
- }
- }
- }
-
- if (done)
- req.doneCb (null);
- else
- this.walk (oid, req.maxRepetitions, req.feedCb, req.doneCb,
- req.baseOid);
+Engine.prototype.generateEngineID = function () {
+ // generate a 17-byte engine ID in the following format:
+ // 0x80 + 0x00B983 (enterprise OID) | 0x80 (enterprise-specific format) | 12 bytes of random
+ this.engineID = Buffer.alloc(17);
+ this.engineID.fill('8000B98380', 'hex', 0, 5);
+ this.engineID.fill(crypto.randomBytes(12), 5, 17, 'hex');
+}
+
+var Listener = function (options, receiver) {
+ this.receiver = receiver;
+ this.callback = receiver.onMsg;
+ this.family = options.transport || 'udp4';
+ this.port = options.port || 161;
+ this.disableAuthorization = options.disableAuthorization || false;
+};
+
+Listener.prototype.startListening = function (receiver) {
+ var me = this;
+ this.dgram = dgram.createSocket(this.family);
+ this.dgram.bind(this.port);
+ this.dgram.on("message", me.callback.bind(me.receiver));
+};
+
+Listener.prototype.send = function (message, rinfo) {
+ var me = this;
+
+ var buffer = message.toBuffer();
+
+ this.dgram.send(buffer, 0, buffer.length, rinfo.port, rinfo.address,
+ function (error, bytes) {
+ if (error) {
+ // me.callback (error);
+ console.error("Error sending: " + error.message);
+ } else {
+ // debug ("Listener sent response message");
+ }
+ });
+};
+
+Listener.formatCallbackData = function (pdu, rinfo) {
+ if (pdu.contextEngineID) {
+ pdu.contextEngineID = pdu.contextEngineID.toString('hex');
+ }
+ delete pdu.nonRepeaters;
+ delete pdu.maxRepetitions;
+ return {
+ pdu: pdu,
+ rinfo: rinfo
+ };
+};
+
+Listener.processIncoming = function (buffer, authorizer, callback) {
+ var message = Message.createFromBuffer(buffer);
+ var community;
+
+ // Authorization
+ if (message.version == Version3) {
+ message.user = authorizer.users.filter(localUser => localUser.name ==
+ message.msgSecurityParameters.msgUserName)[0];
+ message.disableAuthentication = authorizer.disableAuthorization;
+ if (!message.user) {
+ if (message.msgSecurityParameters.msgUserName != "" && !authorizer.disableAuthorization) {
+ callback(new RequestFailedError("Local user not found for message with user " +
+ message.msgSecurityParameters.msgUserName));
+ return;
+ } else if (message.hasAuthentication()) {
+ callback(new RequestFailedError("Local user not found and message requires authentication with user " +
+ message.msgSecurityParameters.msgUserName));
+ return;
+ } else {
+ message.user = {
+ name: "",
+ level: SecurityLevel.noAuthNoPriv
+ };
+ }
+ }
+ if (!message.processIncomingSecurity(message.user, callback)) {
+ return;
+ }
+ } else {
+ community = authorizer.communities.filter(localCommunity => localCommunity == message.community)[0];
+ if (!community && !authorizer.disableAuthorization) {
+ callback(new RequestFailedError("Local community not found for message with community " + message.community));
+ return;
+ }
+ }
+
+ return message;
+};
+
+var Authorizer = function () {
+ this.communities = [];
+ this.users = [];
+}
+
+Authorizer.prototype.addCommunity = function (community) {
+ if (this.getCommunity(community)) {
+ return;
+ } else {
+ this.communities.push(community);
+ }
+};
+
+Authorizer.prototype.getCommunity = function (community) {
+ return this.communities.filter(localCommunity => localCommunity == community)[0] || null;
+};
+
+Authorizer.prototype.getCommunities = function () {
+ return this.communities;
+};
+
+Authorizer.prototype.deleteCommunity = function (community) {
+ var index = this.communities.indexOf(community);
+ if (index > -1) {
+ this.communities.splice(index, 1);
+ }
+};
+
+Authorizer.prototype.addUser = function (user) {
+ if (this.getUser(user.name)) {
+ this.deleteUser(user.name);
+ }
+ this.users.push(user);
+};
+
+Authorizer.prototype.getUser = function (userName) {
+ return this.users.filter(localUser => localUser.name == userName)[0] || null;
+};
+
+Authorizer.prototype.getUsers = function () {
+ return this.users;
+};
+
+Authorizer.prototype.deleteUser = function (userName) {
+ var index = this.users.findIndex(localUser => localUser.name == userName);
+ if (index > -1) {
+ this.users.splice(index, 1);
+ }
+};
+
+
+/*****************************************************************************
+ ** Receiver class definition
+ **/
+
+var Receiver = function (options, callback) {
+ DEBUG = options.debug;
+ this.listener = new Listener(options, this);
+ this.authorizer = new Authorizer();
+ this.engine = new Engine(options.engineID);
+
+ this.engineBoots = 0;
+ this.engineTime = 10;
+ this.disableAuthorization = false;
+
+ this.callback = callback;
+ this.family = options.transport || 'udp4';
+ this.port = options.port || 162;
+ options.port = this.port;
+ this.disableAuthorization = options.disableAuthorization || false;
+ this.context = (options && options.context) ? options.context : "";
+ this.listener = new Listener(options, this);
+};
+
+Receiver.prototype.addCommunity = function (community) {
+ this.authorizer.addCommunity(community);
+};
+
+Receiver.prototype.getCommunity = function (community) {
+ return this.authorizer.getCommunity(community);
+};
+
+Receiver.prototype.getCommunities = function () {
+ return this.authorizer.getCommunities();
+};
+
+Receiver.prototype.deleteCommunity = function (community) {
+ this.authorizer.deleteCommunities(community);
+};
+
+Receiver.prototype.addUser = function (user) {
+ this.authorizer.addUser(user);
+};
+
+Receiver.prototype.getUser = function (userName) {
+ return this.authorizer.getUser(userName);
+};
+
+Receiver.prototype.getUsers = function () {
+ return this.authorizer.getUsers();
+};
+
+Receiver.prototype.deleteUser = function (userName) {
+ this.authorizer.deleteUser(userName);
+};
+
+Receiver.prototype.onMsg = function (buffer, rinfo) {
+ var message = Listener.processIncoming(buffer, this.authorizer, this.callback);
+ var reportMessage;
+
+ if (!message) {
+ return;
+ }
+
+ // The only GetRequest PDUs supported are those used for SNMPv3 discovery
+ if (message.pdu.type == PduType.GetRequest) {
+ if (message.version != Version3) {
+ this.callback(new RequestInvalidError("Only SNMPv3 discovery GetRequests are supported"));
+ return;
+ } else if (message.hasAuthentication()) {
+ this.callback(new RequestInvalidError("Only discovery (noAuthNoPriv) GetRequests are supported but this message has authentication"));
+ return;
+ } else if (!message.isReportable()) {
+ this.callback(new RequestInvalidError("Only discovery GetRequests are supported and this message does not have the reportable flag set"));
+ return;
+ }
+ var reportMessage = message.createReportResponseMessage(this.engine, this.context);
+ this.listener.send(reportMessage, rinfo);
+ return;
+ }
+ ;
+
+ // Inform/trap processing
+ debug(JSON.stringify(message.pdu, null, 2));
+ if (message.pdu.type == PduType.Trap || message.pdu.type == PduType.TrapV2) {
+ this.callback(null, this.formatCallbackData(message.pdu, rinfo));
+ } else if (message.pdu.type == PduType.InformRequest) {
+ message.pdu.type = PduType.GetResponse;
+ message.buffer = null;
+ message.setReportable(false);
+ this.listener.send(message, rinfo);
+ message.pdu.type = PduType.InformRequest;
+ this.callback(null, this.formatCallbackData(message.pdu, rinfo));
+ } else {
+ this.callback(new RequestInvalidError("Unexpected PDU type " + message.pdu.type + " (" + PduType[message.pdu.type] + ")"));
+ }
}
-Session.prototype.walk = function () {
- var me = this;
- var oid = arguments[0];
- var maxRepetitions, feedCb, doneCb, baseOid;
+Receiver.prototype.formatCallbackData = function (pdu, rinfo) {
+ if (pdu.contextEngineID) {
+ pdu.contextEngineID = pdu.contextEngineID.toString('hex');
+ }
+ delete pdu.nonRepeaters;
+ delete pdu.maxRepetitions;
+ return {
+ pdu: pdu,
+ rinfo: rinfo
+ };
+};
+
+Receiver.prototype.close = function () {
+ this.listener.close();
+};
+
+Receiver.create = function (options, callback) {
+ var receiver = new Receiver(options, callback);
+ receiver.listener.startListening();
+ return receiver;
+};
+
+var MibNode = function (address, parent) {
+ this.address = address;
+ this.oid = this.address.join('.');
+ ;
+ this.parent = parent;
+ this.children = {};
+};
+
+MibNode.prototype.child = function (index) {
+ return this.children[index];
+};
+
+MibNode.prototype.listChildren = function (lowest) {
+ var sorted = [];
+
+ lowest = lowest || 0;
+
+ this.children.forEach(function (c, i) {
+ if (i >= lowest)
+ sorted.push(i);
+ });
+
+ sorted.sort(function (a, b) {
+ return (a - b);
+ });
+
+ return sorted;
+};
+
+MibNode.prototype.isDescendant = function (address) {
+ return MibNode.oidIsDescended(this.address, address);
+};
+
+MibNode.prototype.isAncestor = function (address) {
+ return MibNode.oidIsDescended(address, this.address);
+};
+
+MibNode.prototype.getAncestorProvider = function () {
+ if (this.provider) {
+ return this;
+ } else if (!this.parent) {
+ return null;
+ } else {
+ return this.parent.getAncestorProvider();
+ }
+};
+
+MibNode.prototype.getInstanceNodeForTableRow = function () {
+ var childCount = Object.keys(this.children).length;
+ if (childCount == 0) {
+ if (this.value) {
+ return this;
+ } else {
+ return null;
+ }
+ } else if (childCount == 1) {
+ return this.children[0].getInstanceNodeForTableRow();
+ } else if (childCount > 1) {
+ return null;
+ }
+};
+
+MibNode.prototype.getInstanceNodeForTableRowIndex = function (index) {
+ var childCount = Object.keys(this.children).length;
+ if (childCount == 0) {
+ if (this.value) {
+ return this;
+ } else {
+ // not found
+ return null;
+ }
+ } else {
+ if (index.length == 0) {
+ return this.getInstanceNodeForTableRow();
+ } else {
+ var nextChildIndexPart = index[0];
+ if (!nextChildIndexPart) {
+ return null;
+ }
+ remainingIndex = index.slice(1);
+ return this.children[nextChildIndexPart].getInstanceNodeForTableRowIndex(remainingIndex);
+ }
+ }
+};
+
+MibNode.prototype.getNextInstanceNode = function () {
+
+ node = this;
+ if (this.value) {
+ // Need upwards traversal first
+ node = this;
+ while (node) {
+ siblingIndex = node.address.slice(-1)[0];
+ node = node.parent;
+ if (!node) {
+ // end of MIB
+ return null;
+ } else {
+ childrenAddresses = Object.keys(node.children).sort((a, b) => a - b);
+ siblingPosition = childrenAddresses.indexOf(siblingIndex.toString());
+ if (siblingPosition + 1 < childrenAddresses.length) {
+ node = node.children[childrenAddresses[siblingPosition + 1]];
+ break;
+ }
+ }
+ }
+ }
+ // Descent
+ while (node) {
+ if (node.value) {
+ return node;
+ }
+ childrenAddresses = Object.keys(node.children).sort((a, b) => a - b);
+ node = node.children[childrenAddresses[0]];
+ if (!node) {
+ // unexpected
+ return null;
+ }
+ }
+};
+
+MibNode.prototype.delete = function () {
+ if (Object.keys(this.children) > 0) {
+ throw new Error("Cannot delete non-leaf MIB node");
+ }
+ addressLastPart = this.address.slice(-1)[0];
+ delete this.parent.children[addressLastPart];
+ this.parent = null;
+};
+
+MibNode.prototype.pruneUpwards = function () {
+ if (!this.parent) {
+ return
+ }
+ if (Object.keys(this.children).length == 0) {
+ var lastAddressPart = this.address.splice(-1)[0].toString();
+ delete this.parent.children[lastAddressPart];
+ this.parent.pruneUpwards();
+ this.parent = null;
+ }
+}
+
+MibNode.prototype.dump = function (options) {
+ var valueString;
+ if ((!options.leavesOnly || options.showProviders) && this.provider) {
+ console.log(this.oid + " [" + MibProviderType[this.provider.type] + ": " + this.provider.name + "]");
+ } else if ((!options.leavesOnly) || Object.keys(this.children).length == 0) {
+ if (this.value) {
+ valueString = " = ";
+ valueString += options.showTypes ? ObjectType[this.valueType] + ": " : "";
+ valueString += options.showValues ? this.value : "";
+ } else {
+ valueString = "";
+ }
+ console.log(this.oid + valueString);
+ }
+ for (node of Object.keys(this.children).sort((a, b) => a - b)) {
+ this.children[node].dump(options);
+ }
+};
+
+MibNode.oidIsDescended = function (oid, ancestor) {
+ var ancestorAddress = Mib.convertOidToAddress(ancestor);
+ var address = Mib.convertOidToAddress(oid);
+ var isAncestor = true;
+
+ if (address.length <= ancestorAddress.length) {
+ return false;
+ }
- if (arguments.length < 4) {
- maxRepetitions = 20;
- feedCb = arguments[1];
- doneCb = arguments[2];
- } else {
- maxRepetitions = arguments[1];
- feedCb = arguments[2];
- doneCb = arguments[3];
- }
+ ancestorAddress.forEach(function (o, i) {
+ if (address[i] !== ancestorAddress[i]) {
+ isAncestor = false;
+ }
+ });
- var req = {
- maxRepetitions: maxRepetitions,
- feedCb: feedCb,
- doneCb: doneCb
- };
+ return isAncestor;
+};
+
+var Mib = function () {
+ this.root = new MibNode([], null);
+ this.providers = {};
+ this.providerNodes = {};
+};
+
+Mib.prototype.addNodesForOid = function (oidString) {
+ var address = Mib.convertOidToAddress(oidString);
+ return this.addNodesForAddress(address);
+};
+
+Mib.prototype.addNodesForAddress = function (address) {
+ var address;
+ var node;
+ var i;
+
+ node = this.root;
+
+ for (i = 0; i < address.length; i++) {
+ if (!node.children.hasOwnProperty(address[i])) {
+ node.children[address[i]] = new MibNode(address.slice(0, i + 1), node);
+ }
+ node = node.children[address[i]];
+ }
+
+ return node;
+};
+
+Mib.prototype.lookup = function (oid) {
+ var address;
+ var i;
+ var node;
+
+ address = Mib.convertOidToAddress(oid);
+ node = this.root;
+ for (i = 0; i < address.length; i++) {
+ if (!node.children.hasOwnProperty(address[i])) {
+ return null
+ }
+ node = node.children[address[i]];
+ }
+
+ return node;
+};
+
+Mib.prototype.getProviderNodeForInstance = function (instanceNode) {
+ if (instanceNode.provider) {
+ throw new ReferenceError("Instance node has provider which should never happen");
+ }
+ return instanceNode.getAncestorProvider();
+};
+
+Mib.prototype.addProviderToNode = function (provider) {
+ var node = this.addNodesForOid(provider.oid);
+
+ node.provider = provider;
+ if (provider.type == MibProviderType.Table) {
+ if (!provider.index) {
+ provider.index = [1];
+ }
+ }
+ this.providerNodes[provider.name] = node;
+ return node;
+};
+
+Mib.prototype.registerProvider = function (provider) {
+ this.providers[provider.name] = provider;
+};
+
+Mib.prototype.unregisterProvider = function (name) {
+ var providerNode = this.providerNodes[name];
+ if (providerNode) {
+ providerNodeParent = providerNode.parent;
+ providerNode.delete();
+ providerNodeParent.pruneUpwards();
+ delete this.providerNodes[name];
+ }
+ delete this.providers[name];
+};
+
+Mib.prototype.getProvider = function (name) {
+ return this.providers[name];
+};
+
+Mib.prototype.getProviders = function () {
+ return this.providers;
+};
+
+Mib.prototype.getScalarValue = function (scalarName) {
+ var providerNode = this.providerNodes[scalarName];
+ if (!providerNode || !providerNode.provider || providerNode.provider.type != MibProviderType.Scalar) {
+ throw new ReferenceError("Failed to get node for registered MIB provider " + scalarName);
+ }
+ var instanceAddress = providerNode.address.concat([0]);
+ if (!this.lookup(instanceAddress)) {
+ throw new Error("Failed created instance node for registered MIB provider " + scalarName);
+ }
+ var instanceNode = this.lookup(instanceAddress);
+ return instanceNode.value;
+};
+
+Mib.prototype.setScalarValue = function (scalarName, newValue) {
+ var providerNode;
+ var instanceNode;
+
+ if (!this.providers[scalarName]) {
+ throw new ReferenceError("Provider " + scalarName + " not registered with this MIB");
+ }
+
+ providerNode = this.providerNodes[scalarName];
+ if (!providerNode) {
+ providerNode = this.addProviderToNode(this.providers[scalarName]);
+ }
+ if (!providerNode || !providerNode.provider || providerNode.provider.type != MibProviderType.Scalar) {
+ throw new ReferenceError("Could not find MIB node for registered provider " + scalarName);
+ }
+ var instanceAddress = providerNode.address.concat([0]);
+ instanceNode = this.lookup(instanceAddress);
+ if (!instanceNode) {
+ this.addNodesForAddress(instanceAddress);
+ instanceNode = this.lookup(instanceAddress);
+ instanceNode.valueType = providerNode.provider.scalarType;
+ }
+ instanceNode.value = newValue;
+};
+
+Mib.prototype.getProviderNodeForTable = function (table) {
+ var providerNode;
+ var provider;
+
+ providerNode = this.providerNodes[table];
+ if (!providerNode) {
+ throw new ReferenceError("No MIB provider registered for " + table);
+ }
+ provider = providerNode.provider;
+ if (!providerNode) {
+ throw new ReferenceError("No MIB provider definition for registered provider " + table);
+ }
+ if (provider.type != MibProviderType.Table) {
+ throw new TypeError("Registered MIB provider " + table +
+ " is not of the correct type (is type " + MibProviderType[provider.type] + ")");
+ }
+ return providerNode;
+};
+
+Mib.prototype.addTableRow = function (table, row) {
+ var providerNode;
+ var provider;
+ var instance = [];
+ var instanceAddress;
+ var instanceNode;
+
+ if (this.providers[table] && !this.providerNodes[table]) {
+ this.addProviderToNode(this.providers[table]);
+ }
+ providerNode = this.getProviderNodeForTable(table);
+ provider = providerNode.provider;
+ for (var indexPart of provider.index) {
+ columnPosition = provider.columns.findIndex(column => column.number == indexPart);
+ instance.push(row[columnPosition]);
+ }
+ for (var i = 0; i < providerNode.provider.columns.length; i++) {
+ var column = providerNode.provider.columns[i];
+ instanceAddress = providerNode.address.concat(column.number).concat(instance);
+ this.addNodesForAddress(instanceAddress);
+ instanceNode = this.lookup(instanceAddress);
+ instanceNode.valueType = column.type;
+ instanceNode.value = row[i];
+ }
+};
+
+Mib.prototype.getTableColumnDefinitions = function (table) {
+ var providerNode;
+ var provider;
+
+ providerNode = this.getProviderNodeForTable(table);
+ provider = providerNode.provider;
+ return provider.columns;
+};
+
+Mib.prototype.getTableColumnCells = function (table, columnNumber) {
+ providerNode = this.getProviderNodeForTable(table);
+ columnNode = providerNode.children[columnNumber];
+ column = []
+ for (var row of Object.keys(columnNode.children)) {
+ instanceNode = columnNode.children[row].getInstanceNodeForTableRow();
+ column.push(instanceNode.value);
+ }
+ return column;
+};
+
+Mib.prototype.getTableRowCells = function (table, rowIndex) {
+ var providerNode;
+ var columnNode;
+ var instanceNode;
+ var row = [];
+
+ providerNode = this.getProviderNodeForTable(table);
+ for (var columnNumber of Object.keys(providerNode.children)) {
+ columnNode = providerNode.children[columnNumber];
+ instanceNode = columnNode.getInstanceNodeForTableRowIndex(rowIndex);
+ row.push(instanceNode.value);
+ }
+ return row;
+};
+
+Mib.prototype.getTableCells = function (table, byRows) {
+ var providerNode;
+ var columnNode;
+ var data = [];
+
+ providerNode = this.getProviderNodeForTable(table);
+ for (var columnNumber of Object.keys(providerNode.children)) {
+ columnNode = providerNode.children[columnNumber];
+ column = [];
+ data.push(column);
+ for (var row of Object.keys(columnNode.children)) {
+ instanceNode = columnNode.children[row].getInstanceNodeForTableRow();
+ column.push(instanceNode.value);
+ }
+ }
+
+ if (byRows) {
+ return Object.keys(data[0]).map(function (c) {
+ return data.map(function (r) {
+ return r[c];
+ });
+ });
+ } else {
+ return data;
+ }
+
+};
+
+Mib.prototype.getTableSingleCell = function (table, columnNumber, rowIndex) {
+ var providerNode;
+ var columnNode;
+ var instanceNode;
+
+ providerNode = this.getProviderNodeForTable(table);
+ columnNode = providerNode.children[columnNumber];
+ instanceNode = columnNode.getInstanceNodeForTableRowIndex(rowIndex);
+ return instanceNode.value;
+};
+
+Mib.prototype.setTableSingleCell = function (table, columnNumber, rowIndex, value) {
+ var providerNode;
+ var columnNode;
+ var instanceNode;
+
+ providerNode = this.getProviderNodeForTable(table);
+ columnNode = providerNode.children[columnNumber];
+ instanceNode = columnNode.getInstanceNodeForTableRowIndex(rowIndex);
+ instanceNode.value = value;
+};
+
+Mib.prototype.deleteTableRow = function (table, rowIndex) {
+ var providerNode;
+ var columnNode;
+ var instanceNode;
+ var row = [];
+
+ providerNode = this.getProviderNodeForTable(table);
+ for (var columnNumber of Object.keys(providerNode.children)) {
+ columnNode = providerNode.children[columnNumber];
+ instanceNode = columnNode.getInstanceNodeForTableRowIndex(rowIndex);
+ if (instanceNode) {
+ instanceParentNode = instanceNode.parent;
+ instanceNode.delete();
+ instanceParentNode.pruneUpwards();
+ } else {
+ throw new ReferenceError("Cannot find row for index " + rowIndex + " at registered provider " + table);
+ }
+ }
+ return row;
+};
+
+Mib.prototype.dump = function (options) {
+ if (!options) {
+ options = {};
+ }
+ var completedOptions = {
+ leavesOnly: options.leavesOnly || true,
+ showProviders: options.leavesOnly || true,
+ showValues: options.leavesOnly || true,
+ showTypes: options.leavesOnly || true
+ };
+ this.root.dump(completedOptions);
+};
+
+Mib.convertOidToAddress = function (oid) {
+ var address;
+ var oidArray;
+ var i;
+
+ if (typeof (oid) === 'object' && util.isArray(oid)) {
+ address = oid;
+ } else if (typeof (oid) === 'string') {
+ address = oid.split('.');
+ } else {
+ throw new TypeError('oid (string or array) is required');
+ }
+
+ if (address.length < 3)
+ throw new RangeError('object identifier is too short');
+
+ oidArray = [];
+ for (i = 0; i < address.length; i++) {
+ var n;
+
+ if (address[i] === '')
+ continue;
+
+ if (address[i] === true || address[i] === false) {
+ throw new TypeError('object identifier component ' +
+ address[i] + ' is malformed');
+ }
+
+ n = Number(address[i]);
+
+ if (isNaN(n)) {
+ throw new TypeError('object identifier component ' +
+ address[i] + ' is malformed');
+ }
+ if (n % 1 !== 0) {
+ throw new TypeError('object identifier component ' +
+ address[i] + ' is not an integer');
+ }
+ if (i === 0 && n > 2) {
+ throw new RangeError('object identifier does not ' +
+ 'begin with 0, 1, or 2');
+ }
+ if (i === 1 && n > 39) {
+ throw new RangeError('object identifier second ' +
+ 'component ' + n + ' exceeds encoding limit of 39');
+ }
+ if (n < 0) {
+ throw new RangeError('object identifier component ' +
+ address[i] + ' is negative');
+ }
+ if (n > MAX_INT32) {
+ throw new RangeError('object identifier component ' +
+ address[i] + ' is too large');
+ }
+ oidArray.push(n);
+ }
+
+ return oidArray;
+
+};
+
+var MibRequest = function (requestDefinition) {
+ this.operation = requestDefinition.operation;
+ this.address = Mib.convertOidToAddress(requestDefinition.oid);
+ this.oid = this.address.join('.');
+ this.providerNode = requestDefinition.providerNode;
+ this.instanceNode = requestDefinition.instanceNode;
+};
+
+MibRequest.prototype.isScalar = function () {
+ return this.providerNode && this.providerNode.provider &&
+ this.providerNode.provider.type == MibProviderType.Scalar;
+};
- if (this.version == Version2c)
- this.getBulk ([oid], 0, maxRepetitions,
- walkCb.bind (me, req));
- else
- this.getNext ([oid], walkCb.bind (me, req));
+MibRequest.prototype.isTabular = function () {
+ return this.providerNode && this.providerNode.provider &&
+ this.providerNode.provider.type == MibProviderType.Table;
+};
+
+var Agent = function (options, callback) {
+ DEBUG = options.debug;
+ this.listener = new Listener(options, this);
+ this.engine = new Engine(options.engineID);
+ this.authorizer = new Authorizer();
+ this.mib = new Mib();
+ this.callback = callback || function () {
+ };
+ this.context = "";
+};
+
+Agent.prototype.getMib = function () {
+ return this.mib;
+};
+
+Agent.prototype.getAuthorizer = function () {
+ return this.authorizer;
+};
+
+Agent.prototype.registerProvider = function (provider) {
+ this.mib.registerProvider(provider);
+};
- return this;
+Agent.prototype.unregisterProvider = function (provider) {
+ this.mib.unregisterProvider(provider);
+};
+
+Agent.prototype.getProvider = function (provider) {
+ return this.mib.getProvider(provider);
+};
+
+Agent.prototype.getProviders = function () {
+ return this.mib.getProviders();
+};
+
+Agent.prototype.onMsg = function (buffer, rinfo) {
+ var message = Listener.processIncoming(buffer, this.authorizer, this.callback);
+ var reportMessage;
+ var responseMessage;
+
+ if (!message) {
+ return;
+ }
+
+ // SNMPv3 discovery
+ if (message.version == Version3 && message.pdu.type == PduType.GetRequest &&
+ !message.hasAuthoritativeEngineID() && message.isReportable()) {
+ reportMessage = message.createReportResponseMessage(this.engine, this.context);
+ this.listener.send(reportMessage, rinfo);
+ return;
+ }
+
+ // Request processing
+ debug(JSON.stringify(message.pdu, null, 2));
+ if (message.pdu.type == PduType.GetRequest) {
+ responseMessage = this.request(message, rinfo);
+ } else if (message.pdu.type == PduType.SetRequest) {
+ responseMessage = this.request(message, rinfo);
+ } else if (message.pdu.type == PduType.GetNextRequest) {
+ responseMessage = this.getNextRequest(message, rinfo);
+ } else if (message.pdu.type == PduType.GetBulkRequest) {
+ responseMessage = this.getBulkRequest(message, rinfo);
+ } else {
+ this.callback(new RequestInvalidError("Unexpected PDU type " +
+ message.pdu.type + " (" + PduType[message.pdu.type] + ")"));
+ }
+
+};
+
+Agent.prototype.request = function (requestMessage, rinfo) {
+ var me = this;
+ var varbindsCompleted = 0;
+ var requestPdu = requestMessage.pdu;
+ var varbindsLength = requestPdu.varbinds.length;
+ var responsePdu = requestPdu.getResponsePduForRequest();
+
+ for (var i = 0; i < requestPdu.varbinds.length; i++) {
+ var requestVarbind = requestPdu.varbinds[i];
+ var instanceNode = this.mib.lookup(requestVarbind.oid);
+ var providerNode;
+ var mibRequest;
+ var handler;
+ var responseVarbindType;
+
+ if (!instanceNode) {
+ mibRequest = new MibRequest({
+ operation: requestPdu.type,
+ oid: requestVarbind.oid
+ });
+ handler = function getNsoHandler(mibRequestForNso) {
+ mibRequestForNso.done({
+ errorStatus: ErrorStatus.NoSuchName,
+ errorIndex: i
+ });
+ };
+ } else {
+ providerNode = this.mib.getProviderNodeForInstance(instanceNode);
+ mibRequest = new MibRequest({
+ operation: requestPdu.type,
+ providerNode: providerNode,
+ instanceNode: instanceNode,
+ oid: requestVarbind.oid
+ });
+ handler = providerNode.provider.handler;
+ }
+
+ mibRequest.done = function (error) {
+ if (error) {
+ responsePdu.errorStatus = error.errorStatus;
+ responsePdu.errorIndex = error.errorIndex;
+ responseVarbind = {
+ oid: mibRequest.oid,
+ type: ObjectType.Null,
+ value: null
+ };
+ } else {
+ if (requestPdu.type == PduType.SetRequest) {
+ mibRequest.instanceNode.value = requestVarbind.value;
+ }
+ if (requestPdu.type == PduType.GetNextRequest && requestVarbind.type == ObjectType.EndOfMibView) {
+ responseVarbindType = ObjectType.EndOfMibView;
+ } else {
+ responseVarbindType = mibRequest.instanceNode.valueType;
+ }
+ responseVarbind = {
+ oid: mibRequest.oid,
+ type: responseVarbindType,
+ value: mibRequest.instanceNode.value
+ };
+ }
+ me.setSingleVarbind(responsePdu, i, responseVarbind);
+ if (++varbindsCompleted == varbindsLength) {
+ me.sendResponse.call(me, rinfo, requestMessage, responsePdu);
+ }
+ };
+ if (handler) {
+ handler(mibRequest);
+ } else {
+ mibRequest.done();
+ }
+ }
+ ;
+};
+
+Agent.prototype.addGetNextVarbind = function (targetVarbinds, startOid) {
+ var startNode = this.mib.lookup(startOid);
+ var getNextNode;
+
+ if (!startNode) {
+ // Off-tree start specified
+ targetVarbinds.push({
+ oid: requestVarbind.oid,
+ type: ObjectType.Null,
+ value: null
+ });
+ } else {
+ getNextNode = startNode.getNextInstanceNode();
+ if (!getNextNode) {
+ // End of MIB
+ targetVarbinds.push({
+ oid: requestVarbind.oid,
+ type: ObjectType.EndOfMibView,
+ value: null
+ });
+ } else {
+ // Normal response
+ targetVarbinds.push({
+ oid: getNextNode.oid,
+ type: getNextNode.valueType,
+ value: getNextNode.value
+ });
+ }
+ }
+ return getNextNode;
+};
+
+Agent.prototype.getNextRequest = function (requestMessage, rinfo) {
+ var requestPdu = requestMessage.pdu;
+ var varbindsLength = requestPdu.varbinds.length;
+ var getNextVarbinds = [];
+
+ for (var i = 0; i < varbindsLength; i++) {
+ this.addGetNextVarbind(getNextVarbinds, requestPdu.varbinds[i].oid);
+ }
+
+ requestMessage.pdu.varbinds = getNextVarbinds;
+ this.request(requestMessage, rinfo);
+};
+
+Agent.prototype.getBulkRequest = function (requestMessage, rinfo) {
+ var requestPdu = requestMessage.pdu;
+ var requestVarbinds = requestPdu.varbinds;
+ var getBulkVarbinds = [];
+ var startOid = [];
+ var getNextNode;
+
+ for (var n = 0; n < requestPdu.nonRepeaters; n++) {
+ this.addGetNextVarbind(getBulkVarbinds, requestVarbinds[n].oid);
+ }
+
+ for (var v = requestPdu.nonRepeaters; v < requestVarbinds.length; v++) {
+ startOid.push(requestVarbinds[v].oid);
+ }
+
+ for (var r = 0; r < requestPdu.maxRepetitions; r++) {
+ for (var v = requestPdu.nonRepeaters; v < requestVarbinds.length; v++) {
+ getNextNode = this.addGetNextVarbind(getBulkVarbinds, startOid[v - requestPdu.nonRepeaters]);
+ if (getNextNode) {
+ startOid[v - requestPdu.nonRepeaters] = getNextNode.oid;
+ }
+ }
+ }
+
+ requestMessage.pdu.varbinds = getBulkVarbinds;
+ this.request(requestMessage, rinfo);
+};
+
+Agent.prototype.setSingleVarbind = function (responsePdu, index, responseVarbind) {
+ responsePdu.varbinds[index] = responseVarbind;
+};
+
+Agent.prototype.sendResponse = function (rinfo, requestMessage, responsePdu) {
+ var responseMessage = requestMessage.createResponseForRequest(responsePdu);
+ this.listener.send(responseMessage, rinfo);
+ this.callback(null, Listener.formatCallbackData(responseMessage.pdu, rinfo));
+};
+
+Agent.create = function (options, callback) {
+ var agent = new Agent(options, callback);
+ agent.listener.startListening();
+ return agent;
};
/*****************************************************************************
@@ -1438,18 +3400,41 @@ Session.prototype.walk = function () {
exports.Session = Session;
exports.createSession = function (target, community, options) {
- return new Session (target, community, options);
+ if (options.version && !(options.version == Version1 || options.version == Version2c)) {
+ throw new ResponseInvalidError("SNMP community session requested but version '" + options.version + "' specified in options not valid");
+ } else {
+ return new Session(target, community, options);
+ }
};
+exports.createV3Session = function (target, user, options) {
+ if (options.version && options.version != Version3) {
+ throw new ResponseInvalidError("SNMPv3 session requested but version '" + options.version + "' specified in options");
+ } else {
+ options.version = Version3;
+ }
+ return new Session(target, user, options);
+};
+
+exports.createReceiver = Receiver.create;
+exports.createAgent = Agent.create;
+
exports.isVarbindError = isVarbindError;
exports.varbindError = varbindError;
exports.Version1 = Version1;
exports.Version2c = Version2c;
+exports.Version3 = Version3;
+exports.Version = Version;
exports.ErrorStatus = ErrorStatus;
exports.TrapType = TrapType;
exports.ObjectType = ObjectType;
+exports.PduType = PduType;
+exports.MibProviderType = MibProviderType;
+exports.SecurityLevel = SecurityLevel;
+exports.AuthProtocols = AuthProtocols;
+exports.PrivProtocols = PrivProtocols;
exports.ResponseInvalidError = ResponseInvalidError;
exports.RequestInvalidError = RequestInvalidError;
@@ -1460,6 +3445,8 @@ exports.RequestTimedOutError = RequestTimedOutError;
** We've added this for testing.
**/
exports.ObjectParser = {
- readInt: readInt,
- readUint: readUint
+ readInt: readInt,
+ readUint: readUint
};
+exports.Authentication = Authentication;
+exports.Encryption = Encryption;
diff --git a/collectors/node.d.plugin/sma_webbox/README.md b/collectors/node.d.plugin/sma_webbox/README.md
index 296975626..ba7039d20 100644
--- a/collectors/node.d.plugin/sma_webbox/README.md
+++ b/collectors/node.d.plugin/sma_webbox/README.md
@@ -1,4 +1,10 @@
-# SMA Sunny Webbox
+<!--
+title: "SMA Sunny WebBox monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/sma_webbox/README.md
+sidebar_label: "SMA Sunny WebBox"
+-->
+
+# SMA Sunny WebBox monitoring with Netdata
[SMA Sunny Webbox](http://files.sma.de/dl/4253/WEBBOX-DUS131916W.pdf)
diff --git a/collectors/node.d.plugin/snmp/README.md b/collectors/node.d.plugin/snmp/README.md
index c661bac8c..93ade5e64 100644
--- a/collectors/node.d.plugin/snmp/README.md
+++ b/collectors/node.d.plugin/snmp/README.md
@@ -1,15 +1,26 @@
-# SNMP Data Collector
+<!--
+title: "SNMP device monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/snmp/README.md
+sidebar_label: "SNMP"
+-->
-Using this collector, Netdata can collect data from any SNMP device.
+# SNMP device monitoring with Netdata
-This collector supports:
+Collects data from any SNMP device and uses the [net-snmp](https://github.com/markabrahams/node-net-snmp) module.
+It supports:
+
+- all SNMP versions: SNMPv1, SNMPv2c and SNMPv3
- any number of SNMP devices
- each SNMP device can be used to collect data for any number of charts
- each chart may have any number of dimensions
- each SNMP device may have a different update frequency
- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).
+## Requirements
+
+- `nodejs` minimum required version 4
+
## Configuration
You will need to create the file `/etc/netdata/node.d/snmp.conf` with data like the following.
@@ -32,7 +43,9 @@ In this example:
"community": "public",
"update_every": 10,
"max_request_size": 50,
- "options": { "timeout": 10000 },
+ "options": {
+ "timeout": 10000
+ },
"charts": {
"snmp_switch.bandwidth_port1": {
"title": "Switch Bandwidth for port 1",
@@ -109,7 +122,9 @@ If you need to define many charts using incremental OIDs, you can use something
"hostname": "10.11.12.8",
"community": "public",
"update_every": 10,
- "options": { "timeout": 20000 },
+ "options": {
+ "timeout": 20000
+ },
"charts": {
"snmp_switch.bandwidth_port": {
"title": "Switch Bandwidth for port ",
@@ -117,7 +132,10 @@ If you need to define many charts using incremental OIDs, you can use something
"type": "area",
"priority": 1,
"family": "ports",
- "multiply_range": [ 1, 24 ],
+ "multiply_range": [
+ 1,
+ 24
+ ],
"dimensions": {
"in": {
"oid": "1.3.6.1.2.1.2.2.1.10.",
@@ -152,11 +170,55 @@ Each of the 24 new charts will have its id (1-24) appended at:
The `options` given for each server, are:
-- `timeout`, the time to wait for the SNMP device to respond. The default is 5000 ms.
-- `version`, the SNMP version to use. `0` is Version 1, `1` is Version 2c. The default is Version 1 (`0`).
-- `transport`, the default is `udp4`.
-- `port`, the port of the SNMP device to connect to. The default is `161`.
-- `retries`, the number of attempts to make to fetch the data. The default is `1`.
+- `port` - UDP port to send requests too. Defaults to `161`.
+- `retries` - number of times to re-send a request. Defaults to `1`.
+- `sourceAddress` - IP address from which SNMP requests should originate, there is no default for this option, the operating system will select an appropriate source address when the SNMP request is sent.
+- `sourcePort` - UDP port from which SNMP requests should originate, defaults to an ephemeral port selected by the operation system.
+- `timeout` - number of milliseconds to wait for a response before re-trying or failing. Defaults to `5000`.
+- `transport` - specify the transport to use, can be either `udp4` or `udp6`. Defaults to `udp4`.
+- `version` - either `0` (v1) or `1` (v2) or `3` (v3). Defaults to `0`.
+- `idBitsSize` - either `16` or `32`. Defaults to `32`. Used to reduce the size of the generated id for compatibility with some older devices.
+
+## SNMPv3
+
+To use SNMPv3:
+
+- set `version` to 3
+- use `user` instead of `community`
+
+User syntax:
+
+```json
+{
+ "user": {
+ "name": "userName",
+ "level": 3,
+ "authProtocol": "3",
+ "authKey": "authKey",
+ "privProtocol": "2",
+ "privKey": "privKey"
+ }
+}
+```
+
+Security levels:
+
+- 1 is `noAuthNoPriv`
+- 2 is `authNoPriv`
+- 3 is `authPriv`
+
+Authentication protocols:
+
+- "1" is `none`
+- "2" is `md5`
+- "3" is `sha`
+
+Privacy protocols:
+
+- "1" is `none`
+- "2" is `des`
+
+For additional details please see [net-snmp module readme](https://github.com/markabrahams/node-net-snmp#snmpcreatev3session-target-user-options).
## Retrieving names from snmp
@@ -218,145 +280,152 @@ This switch has a very slow SNMP processors. To respond, it needs about 8 second
"enable_autodetect": false,
"update_every": 5,
"servers": [
- {
- "hostname": "10.11.12.8",
- "community": "public",
- "update_every": 15,
- "options": { "timeout": 20000, "version": 1 },
- "charts": {
- "snmp_switch.power": {
- "title": "Switch Power Supply",
- "units": "watts",
- "type": "line",
- "priority": 10,
- "family": "power",
- "dimensions": {
- "supply": {
- "oid": ".1.3.6.1.2.1.105.1.3.1.1.2.1",
- "algorithm": "absolute",
- "multiplier": 1,
- "divisor": 1,
- "offset": 0
- },
- "used": {
- "oid": ".1.3.6.1.2.1.105.1.3.1.1.4.1",
- "algorithm": "absolute",
- "multiplier": 1,
- "divisor": 1,
- "offset": 0
- }
- }
- }
- , "snmp_switch.input": {
- "title": "Switch Packets Input",
- "units": "packets/s",
- "type": "area",
- "priority": 20,
- "family": "IP",
- "dimensions": {
- "receives": {
- "oid": ".1.3.6.1.2.1.4.3.0",
- "algorithm": "incremental",
- "multiplier": 1,
- "divisor": 1,
- "offset": 0
- }
- , "discards": {
- "oid": ".1.3.6.1.2.1.4.8.0",
- "algorithm": "incremental",
- "multiplier": 1,
- "divisor": 1,
- "offset": 0
- }
- }
- }
- , "snmp_switch.input_errors": {
- "title": "Switch Received Packets with Errors",
- "units": "packets/s",
- "type": "line",
- "priority": 30,
- "family": "IP",
- "dimensions": {
- "bad_header": {
- "oid": ".1.3.6.1.2.1.4.4.0",
- "algorithm": "incremental",
- "multiplier": 1,
- "divisor": 1,
- "offset": 0
- }
- , "bad_address": {
- "oid": ".1.3.6.1.2.1.4.5.0",
- "algorithm": "incremental",
- "multiplier": 1,
- "divisor": 1,
- "offset": 0
- }
- , "unknown_protocol": {
- "oid": ".1.3.6.1.2.1.4.7.0",
- "algorithm": "incremental",
- "multiplier": 1,
- "divisor": 1,
- "offset": 0
- }
- }
- }
- , "snmp_switch.output": {
- "title": "Switch Output Packets",
- "units": "packets/s",
- "type": "line",
- "priority": 40,
- "family": "IP",
- "dimensions": {
- "requests": {
- "oid": ".1.3.6.1.2.1.4.10.0",
- "algorithm": "incremental",
- "multiplier": 1,
- "divisor": 1,
- "offset": 0
+ {
+ "hostname": "10.11.12.8",
+ "community": "public",
+ "update_every": 15,
+ "options": {
+ "timeout": 20000,
+ "version": 1
+ },
+ "charts": {
+ "snmp_switch.power": {
+ "title": "Switch Power Supply",
+ "units": "watts",
+ "type": "line",
+ "priority": 10,
+ "family": "power",
+ "dimensions": {
+ "supply": {
+ "oid": ".1.3.6.1.2.1.105.1.3.1.1.2.1",
+ "algorithm": "absolute",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "used": {
+ "oid": ".1.3.6.1.2.1.105.1.3.1.1.4.1",
+ "algorithm": "absolute",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
}
- , "discards": {
- "oid": ".1.3.6.1.2.1.4.11.0",
- "algorithm": "incremental",
- "multiplier": -1,
- "divisor": 1,
- "offset": 0
+ },
+ "snmp_switch.input": {
+ "title": "Switch Packets Input",
+ "units": "packets/s",
+ "type": "area",
+ "priority": 20,
+ "family": "IP",
+ "dimensions": {
+ "receives": {
+ "oid": ".1.3.6.1.2.1.4.3.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "discards": {
+ "oid": ".1.3.6.1.2.1.4.8.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
}
- , "no_route": {
- "oid": ".1.3.6.1.2.1.4.12.0",
- "algorithm": "incremental",
- "multiplier": -1,
- "divisor": 1,
- "offset": 0
+ },
+ "snmp_switch.input_errors": {
+ "title": "Switch Received Packets with Errors",
+ "units": "packets/s",
+ "type": "line",
+ "priority": 30,
+ "family": "IP",
+ "dimensions": {
+ "bad_header": {
+ "oid": ".1.3.6.1.2.1.4.4.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "bad_address": {
+ "oid": ".1.3.6.1.2.1.4.5.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "unknown_protocol": {
+ "oid": ".1.3.6.1.2.1.4.7.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ }
}
- }
- }
- , "snmp_switch.bandwidth_port": {
- "title": "Switch Bandwidth for port ",
- "titleoid": ".1.3.6.1.2.1.31.1.1.1.18.",
- "units": "kilobits/s",
- "type": "area",
- "priority": 100,
- "family": "ports",
- "multiply_range": [ 1, 24 ],
- "dimensions": {
- "in": {
- "oid": ".1.3.6.1.2.1.2.2.1.10.",
- "algorithm": "incremental",
- "multiplier": 8,
- "divisor": 1024,
- "offset": 0
+ },
+ "snmp_switch.output": {
+ "title": "Switch Output Packets",
+ "units": "packets/s",
+ "type": "line",
+ "priority": 40,
+ "family": "IP",
+ "dimensions": {
+ "requests": {
+ "oid": ".1.3.6.1.2.1.4.10.0",
+ "algorithm": "incremental",
+ "multiplier": 1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "discards": {
+ "oid": ".1.3.6.1.2.1.4.11.0",
+ "algorithm": "incremental",
+ "multiplier": -1,
+ "divisor": 1,
+ "offset": 0
+ },
+ "no_route": {
+ "oid": ".1.3.6.1.2.1.4.12.0",
+ "algorithm": "incremental",
+ "multiplier": -1,
+ "divisor": 1,
+ "offset": 0
+ }
}
- , "out": {
- "oid": ".1.3.6.1.2.1.2.2.1.16.",
- "algorithm": "incremental",
- "multiplier": -8,
- "divisor": 1024,
- "offset": 0
+ },
+ "snmp_switch.bandwidth_port": {
+ "title": "Switch Bandwidth for port ",
+ "titleoid": ".1.3.6.1.2.1.31.1.1.1.18.",
+ "units": "kilobits/s",
+ "type": "area",
+ "priority": 100,
+ "family": "ports",
+ "multiply_range": [
+ 1,
+ 24
+ ],
+ "dimensions": {
+ "in": {
+ "oid": ".1.3.6.1.2.1.2.2.1.10.",
+ "algorithm": "incremental",
+ "multiplier": 8,
+ "divisor": 1024,
+ "offset": 0
+ },
+ "out": {
+ "oid": ".1.3.6.1.2.1.2.2.1.16.",
+ "algorithm": "incremental",
+ "multiplier": -8,
+ "divisor": 1024,
+ "offset": 0
+ }
}
}
}
}
- }],
+ ]
}
```
diff --git a/collectors/node.d.plugin/snmp/snmp.node.js b/collectors/node.d.plugin/snmp/snmp.node.js
index 6b33ae0d5..ca3f0bfbc 100644
--- a/collectors/node.d.plugin/snmp/snmp.node.js
+++ b/collectors/node.d.plugin/snmp/snmp.node.js
@@ -117,53 +117,53 @@ var net_snmp = require('net-snmp');
var extend = require('extend');
var netdata = require('netdata');
-if(netdata.options.DEBUG === true) netdata.debug('loaded', __filename, ' plugin');
+if (netdata.options.DEBUG === true) netdata.debug('loaded', __filename, ' plugin');
netdata.processors.snmp = {
name: 'snmp',
- fixoid: function(oid) {
- if(typeof oid !== 'string')
+ fixoid: function (oid) {
+ if (typeof oid !== 'string')
return oid;
- if(oid.charAt(0) === '.')
+ if (oid.charAt(0) === '.')
return oid.substring(1, oid.length);
return oid;
},
- prepare: function(service) {
+ prepare: function (service) {
var __DEBUG = netdata.options.DEBUG;
- if(typeof service.snmp_oids === 'undefined' || service.snmp_oids === null || service.snmp_oids.length === 0) {
+ if (typeof service.snmp_oids === 'undefined' || service.snmp_oids === null || service.snmp_oids.length === 0) {
// this is the first time we see this service
- if(__DEBUG === true)
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': preparing ' + this.name + ' OIDs');
// build an index of all OIDs
service.snmp_oids_index = {};
var chart_keys = Object.keys(service.request.charts);
var chart_keys_len = chart_keys.length;
- while(chart_keys_len--) {
+ while (chart_keys_len--) {
var c = chart_keys[chart_keys_len];
var chart = service.request.charts[c];
// for each chart
- if(__DEBUG === true)
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c);
- if(typeof chart.titleoid !== 'undefined') {
- service.snmp_oids_index[this.fixoid(chart.titleoid)] = {
- type: 'title',
- link: chart
- };
- }
+ if (typeof chart.titleoid !== 'undefined') {
+ service.snmp_oids_index[this.fixoid(chart.titleoid)] = {
+ type: 'title',
+ link: chart
+ };
+ }
var dim_keys = Object.keys(chart.dimensions);
var dim_keys_len = dim_keys.length;
- while(dim_keys_len--) {
+ while (dim_keys_len--) {
var d = dim_keys[dim_keys_len];
var dim = chart.dimensions[d];
@@ -172,7 +172,7 @@ netdata.processors.snmp = {
var oid = this.fixoid(dim.oid);
var oidname = this.fixoid(dim.oidname);
- if(__DEBUG === true)
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c + ', dimension: ' + d + ', OID: ' + oid + ", OID name: " + oidname);
// link it to the point we need to set the value to
@@ -181,7 +181,7 @@ netdata.processors.snmp = {
link: dim
};
- if(typeof oidname !== 'undefined')
+ if (typeof oidname !== 'undefined')
service.snmp_oids_index[oidname] = {
type: 'name',
link: dim
@@ -192,18 +192,17 @@ netdata.processors.snmp = {
}
}
- if(__DEBUG === true)
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': indexed ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids_index));
// now create the array of OIDs needed by net-snmp
service.snmp_oids = Object.keys(service.snmp_oids_index);
- if(__DEBUG === true)
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': final list of ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids));
service.snmp_oids_cleaned = 0;
- }
- else if(service.snmp_oids_cleaned === 0) {
+ } else if (service.snmp_oids_cleaned === 0) {
service.snmp_oids_cleaned = 1;
// the second time, keep only values
@@ -211,87 +210,82 @@ netdata.processors.snmp = {
service.snmp_oids = new Array();
var oid_keys = Object.keys(service.snmp_oids_index);
var oid_keys_len = oid_keys.length;
- while(oid_keys_len--) {
+ while (oid_keys_len--) {
if (service.snmp_oids_index[oid_keys[oid_keys_len]].type === 'value')
service.snmp_oids.push(oid_keys[oid_keys_len]);
}
}
},
- getdata: function(service, index, ok, failed, callback) {
+ getdata: function (service, index, ok, failed, callback) {
var __DEBUG = netdata.options.DEBUG;
var that = this;
- if(index >= service.snmp_oids.length) {
- callback((ok > 0)?{ ok: ok, failed: failed }:null);
+ if (index >= service.snmp_oids.length) {
+ callback((ok > 0) ? {ok: ok, failed: failed} : null);
return;
}
var slice;
- if(service.snmp_oids.length <= service.request.max_request_size) {
+ if (service.snmp_oids.length <= service.request.max_request_size) {
slice = service.snmp_oids;
index = service.snmp_oids.length;
- }
- else if(service.snmp_oids.length - index <= service.request.max_request_size) {
+ } else if (service.snmp_oids.length - index <= service.request.max_request_size) {
slice = service.snmp_oids.slice(index, service.snmp_oids.length);
index = service.snmp_oids.length;
- }
- else {
+ } else {
slice = service.snmp_oids.slice(index, index + service.request.max_request_size);
index += service.request.max_request_size;
}
- if(__DEBUG === true)
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': making ' + slice.length + ' entries request, max is: ' + service.request.max_request_size);
- service.snmp_session.get(slice, function(error, varbinds) {
- if(error) {
+ service.snmp_session.get(slice, function (error, varbinds) {
+ if (error) {
service.error('Received error = ' + netdata.stringify(error) + ' varbinds = ' + netdata.stringify(varbinds));
// make all values null
var len = slice.length;
- while(len--)
+ while (len--)
service.snmp_oids_index[slice[len]].value = null;
- }
- else {
- if(__DEBUG === true)
+ } else {
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': got valid ' + service.module.name + ' response: ' + netdata.stringify(varbinds));
var varbinds_len = varbinds.length;
- for(var i = 0; i < varbinds_len ; i++) {
+ for (var i = 0; i < varbinds_len; i++) {
var value = null;
- if(net_snmp.isVarbindError(varbinds[i])) {
- if(__DEBUG === true)
+ if (net_snmp.isVarbindError(varbinds[i])) {
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': failed ' + service.module.name + ' get for OIDs ' + varbinds[i].oid);
service.error('OID ' + varbinds[i].oid + ' gave error: ' + net_snmp.varbindError(varbinds[i]));
value = null;
failed++;
- }
- else {
+ } else {
// test fom Counter64
// varbinds[i].type = net_snmp.ObjectType.Counter64;
// varbinds[i].value = new Buffer([0x34, 0x49, 0x2e, 0xdc, 0xd1]);
- switch(varbinds[i].type) {
+ switch (varbinds[i].type) {
case net_snmp.ObjectType.OctetString:
if (service.snmp_oids_index[varbinds[i].oid].type !== 'title' && service.snmp_oids_index[varbinds[i].oid].type !== 'name') {
// parse floating point values, exposed as strings
value = parseFloat(varbinds[i].value) * 1000;
- if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as float in string)");
- }
- else {
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as float in string)");
+ } else {
// just use the string
value = varbinds[i].value;
- if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as string)");
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as string)");
}
break;
case net_snmp.ObjectType.Counter64:
// copy the buffer
value = '0x' + varbinds[i].value.toString('hex');
- if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as buffer)");
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as buffer)");
break;
case net_snmp.ObjectType.Integer:
@@ -299,55 +293,73 @@ netdata.processors.snmp = {
case net_snmp.ObjectType.Gauge:
default:
value = varbinds[i].value;
- if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof(varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as number)");
+ if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as number)");
break;
}
ok++;
}
- if(value !== null) {
- switch(service.snmp_oids_index[varbinds[i].oid].type) {
- case 'title': service.snmp_oids_index[varbinds[i].oid].link.title += ' ' + value; break;
- case 'name' : service.snmp_oids_index[varbinds[i].oid].link.name = value.toString().replace(/\W/g, '_'); break;
- case 'value': service.snmp_oids_index[varbinds[i].oid].link.value = value; break;
+ if (value !== null) {
+ switch (service.snmp_oids_index[varbinds[i].oid].type) {
+ case 'title':
+ service.snmp_oids_index[varbinds[i].oid].link.title += ' ' + value;
+ break;
+ case 'name' :
+ service.snmp_oids_index[varbinds[i].oid].link.name = value.toString().replace(/\W/g, '_');
+ break;
+ case 'value':
+ service.snmp_oids_index[varbinds[i].oid].link.value = value;
+ break;
}
}
}
- if(__DEBUG === true)
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': finished ' + service.module.name + ' with ' + ok + ' successful and ' + failed + ' failed values');
}
that.getdata(service, index, ok, failed, callback);
});
},
- process: function(service, callback) {
+ process: function (service, callback) {
var __DEBUG = netdata.options.DEBUG;
this.prepare(service);
- if(service.snmp_oids.length === 0) {
+ if (service.snmp_oids.length === 0) {
// no OIDs found for this service
- if(__DEBUG === true)
+ if (__DEBUG === true)
service.error('no OIDs to process.');
callback(null);
return;
}
- if(typeof service.snmp_session === 'undefined' || service.snmp_session === null) {
+ if (typeof service.snmp_session === 'undefined' || service.snmp_session === null) {
// no SNMP session has been created for this service
// the SNMP session is just the initialization of NET-SNMP
- if(__DEBUG === true)
- netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' community ' + service.request.community + ' options ' + netdata.stringify(service.request.options));
+ var snmp_version = (service.request.options && service.request.options.version)
+ ? service.request.options.version
+ : net_snmp.Version1;
- // create the SNMP session
- service.snmp_session = net_snmp.createSession (service.request.hostname, service.request.community, service.request.options);
+ if (snmp_version === net_snmp.Version3) {
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' user ' + service.request.user + ' options ' + netdata.stringify(service.request.options));
- if(__DEBUG === true)
+ // create the SNMP session
+ service.snmp_session = net_snmp.createV3Session(service.request.hostname, service.request.user, service.request.options);
+ } else {
+ if (__DEBUG === true)
+ netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' community ' + service.request.community + ' options ' + netdata.stringify(service.request.options));
+
+ // create the SNMP session
+ service.snmp_session = net_snmp.createSession(service.request.hostname, service.request.community, service.request.options);
+ }
+
+ if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': got ' + this.name + ' session: ' + netdata.stringify(service.snmp_session));
// if we later need traps, this is how to do it:
@@ -369,18 +381,18 @@ var snmp = {
charts: {},
- processResponse: function(service, data) {
- if(data !== null) {
- if(service.added !== true)
+ processResponse: function (service, data) {
+ if (data !== null) {
+ if (service.added !== true)
service.commit();
var chart_keys = Object.keys(service.request.charts);
var chart_keys_len = chart_keys.length;
- for(var i = 0; i < chart_keys_len; i++) {
+ for (var i = 0; i < chart_keys_len; i++) {
var c = chart_keys[i];
var chart = snmp.charts[c];
- if(typeof chart === 'undefined') {
+ if (typeof chart === 'undefined') {
chart = service.chart(c, service.request.charts[c]);
snmp.charts[c] = chart;
}
@@ -390,11 +402,11 @@ var snmp = {
var dimensions = service.request.charts[c].dimensions;
var dim_keys = Object.keys(dimensions);
var dim_keys_len = dim_keys.length;
- for(var j = 0; j < dim_keys_len ; j++) {
+ for (var j = 0; j < dim_keys_len; j++) {
var d = dim_keys[j];
if (dimensions[d].value !== null) {
- if(typeof dimensions[d].offset === 'number' && typeof dimensions[d].value === 'number')
+ if (typeof dimensions[d].offset === 'number' && typeof dimensions[d].value === 'number')
service.set(d, dimensions[d].value + dimensions[d].offset);
else
service.set(d, dimensions[d].value);
@@ -410,10 +422,10 @@ var snmp = {
// this function is called only from this module
// its purpose is to prepare the request and call
// netdata.serviceExecute()
- serviceExecute: function(conf) {
+ serviceExecute: function (conf) {
var __DEBUG = netdata.options.DEBUG;
- if(__DEBUG === true)
+ if (__DEBUG === true)
netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', update_every: ' + conf.update_every);
var service = netdata.service({
@@ -427,41 +439,41 @@ var snmp = {
// multiply the charts, if required
var chart_keys = Object.keys(service.request.charts);
var chart_keys_len = chart_keys.length;
- for( var i = 0; i < chart_keys_len ; i++ ) {
+ for (var i = 0; i < chart_keys_len; i++) {
var c = chart_keys[i];
var service_request_chart = service.request.charts[c];
- if(__DEBUG === true)
+ if (__DEBUG === true)
netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', examining chart: ' + c);
- if(typeof service_request_chart.update_every === 'undefined')
+ if (typeof service_request_chart.update_every === 'undefined')
service_request_chart.update_every = service.update_every;
- if(typeof service_request_chart.multiply_range !== 'undefined') {
+ if (typeof service_request_chart.multiply_range !== 'undefined') {
var from = service_request_chart.multiply_range[0];
var to = service_request_chart.multiply_range[1];
var prio = service_request_chart.priority || 1;
- if(prio < snmp.base_priority) prio += snmp.base_priority;
+ if (prio < snmp.base_priority) prio += snmp.base_priority;
- while(from <= to) {
+ while (from <= to) {
var id = c + from.toString();
var chart = extend(true, {}, service_request_chart);
chart.title += from.toString();
- if(typeof chart.titleoid !== 'undefined')
+ if (typeof chart.titleoid !== 'undefined')
chart.titleoid += from.toString();
chart.priority = prio++;
var dim_keys = Object.keys(chart.dimensions);
var dim_keys_len = dim_keys.length;
- for(var j = 0; j < dim_keys_len ; j++) {
+ for (var j = 0; j < dim_keys_len; j++) {
var d = dim_keys[j];
chart.dimensions[d].oid += from.toString();
- if(typeof chart.dimensions[d].oidname !== 'undefined')
+ if (typeof chart.dimensions[d].oidname !== 'undefined')
chart.dimensions[d].oidname += from.toString();
}
service.request.charts[id] = chart;
@@ -469,9 +481,8 @@ var snmp = {
}
delete service.request.charts[c];
- }
- else {
- if(service.request.charts[c].priority < snmp.base_priority)
+ } else {
+ if (service.request.charts[c].priority < snmp.base_priority)
service.request.charts[c].priority += snmp.base_priority;
}
}
@@ -479,19 +490,19 @@ var snmp = {
service.execute(this.processResponse);
},
- configure: function(config) {
+ configure: function (config) {
var added = 0;
- if(typeof config.max_request_size === 'undefined')
+ if (typeof config.max_request_size === 'undefined')
config.max_request_size = 50;
- if(typeof(config.servers) !== 'undefined') {
+ if (typeof (config.servers) !== 'undefined') {
var len = config.servers.length;
- while(len--) {
- if(typeof config.servers[len].update_every === 'undefined')
+ while (len--) {
+ if (typeof config.servers[len].update_every === 'undefined')
config.servers[len].update_every = this.update_every;
- if(typeof config.servers[len].max_request_size === 'undefined')
+ if (typeof config.servers[len].max_request_size === 'undefined')
config.servers[len].max_request_size = config.max_request_size;
this.serviceExecute(config.servers[len]);
@@ -505,8 +516,8 @@ var snmp = {
// module.update()
// this is called repeatidly to collect data, by calling
// service.execute()
- update: function(service, callback) {
- service.execute(function(serv, data) {
+ update: function (service, callback) {
+ service.execute(function (serv, data) {
service.module.processResponse(serv, data);
callback();
});
diff --git a/collectors/node.d.plugin/stiebeleltron/README.md b/collectors/node.d.plugin/stiebeleltron/README.md
index 80adc86b9..59bbf703c 100644
--- a/collectors/node.d.plugin/stiebeleltron/README.md
+++ b/collectors/node.d.plugin/stiebeleltron/README.md
@@ -1,6 +1,12 @@
-# stiebel eltron
+<!--
+title: "Stiebel Eltron ISG monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/stiebeleltron/README.md
+sidebar_label: "Stiebel Eltron ISG"
+-->
-This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web.
+# Stiebel Eltron ISG monitoring with Netdata
+
+Collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web.
**Requirements**
@@ -34,7 +40,7 @@ The charts are configurable, however, the provided default configuration collect
- Heat circuit 1 room temperature in C (set/actual)
- Heat circuit 2 room temperature in C (set/actual)
-5. **Eletric Reheating**
+5. **Electric Reheating**
- Dual Mode Reheating temperature in C (hot water/heating)
@@ -62,7 +68,7 @@ If no configuration is given, the module will be disabled. Each `update_every` i
Original author: BrainDoctor (github)
-The module supports any metrics that are parseable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG.
+The module supports any metrics that are parsable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG.
### Testing
diff --git a/collectors/perf.plugin/Makefile.in b/collectors/perf.plugin/Makefile.in
deleted file mode 100644
index f08212f64..000000000
--- a/collectors/perf.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/perf.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/perf.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/perf.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/perf.plugin/README.md b/collectors/perf.plugin/README.md
index 96405a84d..ccd185ced 100644
--- a/collectors/perf.plugin/README.md
+++ b/collectors/perf.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "perf.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/perf.plugin/README.md
+-->
+
# perf.plugin
`perf.plugin` collects system-wide CPU performance statistics from Performance Monitoring Units (PMU) using
@@ -59,7 +64,7 @@ enable the perf plugin, edit /etc/netdata/netdata.conf and set:
You can use the `command options` parameter to pick what data should be collected and which charts should be
displayed. If `all` is used, all general performance monitoring counters are probed and corresponding charts
are enabled for the available counters. You can also define a particular set of enabled charts using the
-following keywords: `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alighnment`,
+following keywords: `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`,
`emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`.
## Debugging
diff --git a/collectors/perf.plugin/perf_plugin.c b/collectors/perf.plugin/perf_plugin.c
index c645c2798..9fe3c5e07 100644
--- a/collectors/perf.plugin/perf_plugin.c
+++ b/collectors/perf.plugin/perf_plugin.c
@@ -422,7 +422,7 @@ static int perf_collect() {
}
}
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: successfully read event id = %u, value = %lu\n", current_event->id, current_event->value);
+ if(unlikely(debug)) fprintf(stderr, "perf.plugin: successfully read event id = %u, value = %"PRIu64"\n", current_event->id, current_event->value);
}
if(unlikely(perf_events[EV_ID_CPU_CYCLES].value == prev_cpu_cycles_value))
diff --git a/collectors/plugins.d/Makefile.in b/collectors/plugins.d/Makefile.in
deleted file mode 100644
index 856ec294e..000000000
--- a/collectors/plugins.d/Makefile.in
+++ /dev/null
@@ -1,702 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/plugins.d
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
- ctags-recursive dvi-recursive html-recursive info-recursive \
- install-data-recursive install-dvi-recursive \
- install-exec-recursive install-html-recursive \
- install-info-recursive install-pdf-recursive \
- install-ps-recursive install-recursive installcheck-recursive \
- installdirs-recursive pdf-recursive ps-recursive \
- tags-recursive uninstall-recursive
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
- distclean-recursive maintainer-clean-recursive
-am__recursive_targets = \
- $(RECURSIVE_TARGETS) \
- $(RECURSIVE_CLEAN_TARGETS) \
- $(am__extra_recursive_targets)
-AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
- distdir
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-# Read a list of newline-separated strings from the standard input,
-# and print each of them once, without duplicates. Input order is
-# *not* preserved.
-am__uniquify_input = $(AWK) '\
- BEGIN { nonempty = 0; } \
- { items[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in items) print i; }; } \
-'
-# Make sure the list of sources is unique. This is necessary because,
-# e.g., the same source file might be shared among _SOURCES variables
-# for different programs/libraries.
-am__define_uniq_tagged_files = \
- list='$(am__tagged_files)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | $(am__uniquify_input)`
-ETAGS = etags
-CTAGS = ctags
-DIST_SUBDIRS = $(SUBDIRS)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-am__relativize = \
- dir0=`pwd`; \
- sed_first='s,^\([^/]*\)/.*$$,\1,'; \
- sed_rest='s,^[^/]*/*,,'; \
- sed_last='s,^.*/\([^/]*\)$$,\1,'; \
- sed_butlast='s,/*[^/]*$$,,'; \
- while test -n "$$dir1"; do \
- first=`echo "$$dir1" | sed -e "$$sed_first"`; \
- if test "$$first" != "."; then \
- if test "$$first" = ".."; then \
- dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
- dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
- else \
- first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
- if test "$$first2" = "$$first"; then \
- dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
- else \
- dir2="../$$dir2"; \
- fi; \
- dir0="$$dir0"/"$$first"; \
- fi; \
- fi; \
- dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
- done; \
- reldir="$$dir2"
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-SUBDIRS = \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-recursive
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/plugins.d/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/plugins.d/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-
-# This directory's subdirectories are mostly independent; you can cd
-# into them and run 'make' without going through this Makefile.
-# To change the values of 'make' variables: instead of editing Makefiles,
-# (1) if the variable is set in 'config.status', edit 'config.status'
-# (which will cause the Makefiles to be regenerated when you run 'make');
-# (2) otherwise, pass the desired values on the 'make' command line.
-$(am__recursive_targets):
- @fail=; \
- if $(am__make_keepgoing); then \
- failcom='fail=yes'; \
- else \
- failcom='exit 1'; \
- fi; \
- dot_seen=no; \
- target=`echo $@ | sed s/-recursive//`; \
- case "$@" in \
- distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
- *) list='$(SUBDIRS)' ;; \
- esac; \
- for subdir in $$list; do \
- echo "Making $$target in $$subdir"; \
- if test "$$subdir" = "."; then \
- dot_seen=yes; \
- local_target="$$target-am"; \
- else \
- local_target="$$target"; \
- fi; \
- ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
- || eval $$failcom; \
- done; \
- if test "$$dot_seen" = "no"; then \
- $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
- fi; test -z "$$fail"
-
-ID: $(am__tagged_files)
- $(am__define_uniq_tagged_files); mkid -fID $$unique
-tags: tags-recursive
-TAGS: tags
-
-tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
- set x; \
- here=`pwd`; \
- if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
- include_option=--etags-include; \
- empty_fix=.; \
- else \
- include_option=--include; \
- empty_fix=; \
- fi; \
- list='$(SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
- test ! -f $$subdir/TAGS || \
- set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
- fi; \
- done; \
- $(am__define_uniq_tagged_files); \
- shift; \
- if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
- test -n "$$unique" || unique=$$empty_fix; \
- if test $$# -gt 0; then \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- "$$@" $$unique; \
- else \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- $$unique; \
- fi; \
- fi
-ctags: ctags-recursive
-
-CTAGS: ctags
-ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
- $(am__define_uniq_tagged_files); \
- test -z "$(CTAGS_ARGS)$$unique" \
- || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
- $$unique
-
-GTAGS:
- here=`$(am__cd) $(top_builddir) && pwd` \
- && $(am__cd) $(top_srcdir) \
- && gtags -i $(GTAGS_ARGS) "$$here"
-cscopelist: cscopelist-recursive
-
-cscopelist-am: $(am__tagged_files)
- list='$(am__tagged_files)'; \
- case "$(srcdir)" in \
- [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
- *) sdir=$(subdir)/$(srcdir) ;; \
- esac; \
- for i in $$list; do \
- if test -f "$$i"; then \
- echo "$(subdir)/$$i"; \
- else \
- echo "$$sdir/$$i"; \
- fi; \
- done >> $(top_builddir)/cscope.files
-
-distclean-tags:
- -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
- @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
- $(am__make_dryrun) \
- || test -d "$(distdir)/$$subdir" \
- || $(MKDIR_P) "$(distdir)/$$subdir" \
- || exit 1; \
- dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
- $(am__relativize); \
- new_distdir=$$reldir; \
- dir1=$$subdir; dir2="$(top_distdir)"; \
- $(am__relativize); \
- new_top_distdir=$$reldir; \
- echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
- echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
- ($(am__cd) $$subdir && \
- $(MAKE) $(AM_MAKEFLAGS) \
- top_distdir="$$new_top_distdir" \
- distdir="$$new_distdir" \
- am__remove_distdir=: \
- am__skip_length_check=: \
- am__skip_mode_fix=: \
- distdir) \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-recursive
-all-am: Makefile $(DATA)
-installdirs: installdirs-recursive
-installdirs-am:
-install: install-recursive
-install-exec: install-exec-recursive
-install-data: install-data-recursive
-uninstall: uninstall-recursive
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-recursive
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-recursive
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-recursive
- -rm -f Makefile
-distclean-am: clean-am distclean-generic distclean-tags
-
-dvi: dvi-recursive
-
-dvi-am:
-
-html: html-recursive
-
-html-am:
-
-info: info-recursive
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-recursive
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-recursive
-
-install-html-am:
-
-install-info: install-info-recursive
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-recursive
-
-install-pdf-am:
-
-install-ps: install-ps-recursive
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-recursive
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-recursive
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-recursive
-
-pdf-am:
-
-ps: ps-recursive
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: $(am__recursive_targets) install-am install-strip
-
-.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
- check-am clean clean-generic cscopelist-am ctags ctags-am \
- distclean distclean-generic distclean-tags distdir dvi dvi-am \
- html html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs installdirs-am maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md
index 289b8c1a0..c166e11e3 100644
--- a/collectors/plugins.d/README.md
+++ b/collectors/plugins.d/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "External plugins overview"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/plugins.d/README.md
+-->
+
# External plugins overview
`plugins.d` is the Netdata internal plugin that collects metrics
@@ -7,18 +12,18 @@ from external processes, thus allowing Netdata to use **external plugins**.
|plugin|language|O/S|description|
|:----:|:------:|:-:|:----------|
-|[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.|
-|[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.|
-|[cups.plugin](../cups.plugin/)|`C`|all|monitors **CUPS**|
-|[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.|
-|[ioping.plugin](../ioping.plugin/)|`C`|all|measures disk latency.|
-|[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.|
-|[nfacct.plugin](../nfacct.plugin/)|`C`|linux|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`.|
-|[xenstat.plugin](../xenstat.plugin/)|`C`|linux|collects XenServer and XCP-ng metrics using `lxenstat`.|
-|[perf.plugin](../perf.plugin/)|`C`|linux|collects CPU performance metrics using performance monitoring units (PMU).|
-|[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.|
-|[python.d.plugin](../python.d.plugin/)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).|
-|[slabinfo.plugin](../slabinfo.plugin/)|`C`|linux|collects kernel internal cache objects (SLAB) metrics.|
+|[apps.plugin](/collectors/apps.plugin/README.md)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.|
+|[charts.d.plugin](/collectors/charts.d.plugin/README.md)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.|
+|[cups.plugin](/collectors/cups.plugin/README.md)|`C`|all|monitors **CUPS**|
+|[fping.plugin](/collectors/fping.plugin/README.md)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.|
+|[ioping.plugin](/collectors/ioping.plugin/README.md)|`C`|all|measures disk latency.|
+|[freeipmi.plugin](/collectors/freeipmi.plugin/README.md)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.|
+|[nfacct.plugin](/collectors/nfacct.plugin/README.md)|`C`|linux|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`.|
+|[xenstat.plugin](/collectors/xenstat.plugin/README.md)|`C`|linux|collects XenServer and XCP-ng metrics using `lxenstat`.|
+|[perf.plugin](/collectors/perf.plugin/README.md)|`C`|linux|collects CPU performance metrics using performance monitoring units (PMU).|
+|[node.d.plugin](/collectors/node.d.plugin/README.md)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.|
+|[python.d.plugin](/collectors/python.d.plugin/README.md)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).|
+|[slabinfo.plugin](/collectors/slabinfo.plugin/README.md)|`C`|linux|collects kernel internal cache objects (SLAB) metrics.|
Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native Netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation.
@@ -57,7 +62,7 @@ Plugins can create any number of charts with any number of dimensions each. Each
Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for Netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
-The `netdata.conf` section [plugins] section contains a list of all the plugins found at the system where Netdata runs, with a boolean setting to enable them or not.
+The `netdata.conf` section `[plugins]` section contains a list of all the plugins found at the system where Netdata runs, with a boolean setting to enable them or not.
Example:
@@ -74,10 +79,10 @@ Example:
```
The setting `enable running new plugins` sets the default behavior for all external plugins. It can be
-overriden for distinct plugins by modifying the appropriate plugin value configuration to either `yes` or `now`.
+overridden for distinct plugins by modifying the appropriate plugin value configuration to either `yes` or `no`.
-The setting `check for new plugins every` sets the interval between scans of the directory `/usr/libexec/netdata/plugins.d`.
-New plugins can be added anytime and netdata will detect them in a timely manner.
+The setting `check for new plugins every` sets the interval between scans of the directory
+`/usr/libexec/netdata/plugins.d`. New plugins can be added any time, and Netdata will detect them in a timely manner.
For each of the external plugins enabled, another `netdata.conf` section
is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin.
@@ -378,15 +383,18 @@ or do not output the line at all.
## Modular Plugins
-1. **python**, use `python.d.plugin`, there are many examples in the [python.d directory](../python.d.plugin/)
+1. **python**, use `python.d.plugin`, there are many examples in the [python.d
+ directory](/collectors/python.d.plugin/README.md)
python is ideal for Netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it.
-2. **node.js**, use `node.d.plugin`, there are a few examples in the [node.d directory](../node.d.plugin/)
+2. **node.js**, use `node.d.plugin`, there are a few examples in the [node.d
+ directory](/collectors/node.d.plugin/README.md)
node.js is the fastest scripting language for collecting data. If your plugin needs to do a lot of work, compute values, etc, node.js is probably the best choice before moving to compiled code. Keep in mind though that node.js is not memory efficient; it will probably need more RAM compared to python.
-3. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d directory](../charts.d.plugin/)
+3. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d
+ directory](/collectors/charts.d.plugin/README.md)
BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources.
diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c
index 32c584032..42889fa8c 100644
--- a/collectors/plugins.d/plugins_d.c
+++ b/collectors/plugins.d/plugins_d.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "plugins_d.h"
+#include "pluginsd_parser.h"
char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { NULL };
struct plugind *pluginsd_root = NULL;
-static inline int pluginsd_space(char c) {
+inline int pluginsd_space(char c) {
switch(c) {
case ' ':
case '\t':
@@ -19,8 +20,9 @@ static inline int pluginsd_space(char c) {
}
}
-inline int config_isspace(char c) {
- switch(c) {
+inline int config_isspace(char c)
+{
+ switch (c) {
case ' ':
case '\t':
case '\r':
@@ -34,15 +36,18 @@ inline int config_isspace(char c) {
}
// split a text into words, respecting quotes
-static inline int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char)) {
+static inline int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char), char *recover_input, char **recover_location, int max_recover)
+{
char *s = str, quote = 0;
- int i = 0, j;
+ int i = 0, j, rec = 0;
+ char *recover = recover_input;
// skip all white space
- while(unlikely(custom_isspace(*s))) s++;
+ while (unlikely(custom_isspace(*s)))
+ s++;
// check for quote
- if(unlikely(*s == '\'' || *s == '"')) {
+ if (unlikely(*s == '\'' || *s == '"')) {
quote = *s; // remember the quote
s++; // skip the quote
}
@@ -51,595 +56,95 @@ static inline int quoted_strings_splitter(char *str, char **words, int max_words
words[i++] = s;
// while we have something
- while(likely(*s)) {
+ while (likely(*s)) {
// if it is escape
- if(unlikely(*s == '\\' && s[1])) {
+ if (unlikely(*s == '\\' && s[1])) {
s += 2;
continue;
}
// if it is quote
- else if(unlikely(*s == quote)) {
+ else if (unlikely(*s == quote)) {
quote = 0;
+ if (recover && rec < max_recover) {
+ recover_location[rec++] = s;
+ *recover++ = *s;
+ }
*s = ' ';
continue;
}
// if it is a space
- else if(unlikely(quote == 0 && custom_isspace(*s))) {
-
+ else if (unlikely(quote == 0 && custom_isspace(*s))) {
// terminate the word
+ if (recover && rec < max_recover) {
+ if (!rec || (rec && recover_location[rec-1] != s)) {
+ recover_location[rec++] = s;
+ *recover++ = *s;
+ }
+ }
*s++ = '\0';
// skip all white space
- while(likely(custom_isspace(*s))) s++;
+ while (likely(custom_isspace(*s)))
+ s++;
// check for quote
- if(unlikely(*s == '\'' || *s == '"')) {
+ if (unlikely(*s == '\'' || *s == '"')) {
quote = *s; // remember the quote
s++; // skip the quote
}
// if we reached the end, stop
- if(unlikely(!*s)) break;
+ if (unlikely(!*s))
+ break;
// store the next word
- if(likely(i < max_words)) words[i++] = s;
- else break;
+ if (likely(i < max_words))
+ words[i++] = s;
+ else
+ break;
}
// anything else
- else s++;
+ else
+ s++;
}
// terminate the words
j = i;
- while(likely(j < max_words)) words[j++] = NULL;
+ while (likely(j < max_words))
+ words[j++] = NULL;
return i;
}
-inline int pluginsd_initialize_plugin_directories() {
+inline int pluginsd_initialize_plugin_directories()
+{
char plugins_dirs[(FILENAME_MAX * 2) + 1];
static char *plugins_dir_list = NULL;
// Get the configuration entry
- if(likely(!plugins_dir_list)) {
+ if (likely(!plugins_dir_list)) {
snprintfz(plugins_dirs, FILENAME_MAX * 2, "\"%s\" \"%s/custom-plugins.d\"", PLUGINS_DIR, CONFIG_DIR);
- plugins_dir_list = strdupz(config_get(CONFIG_SECTION_GLOBAL, "plugins directory", plugins_dirs));
+ plugins_dir_list = strdupz(config_get(CONFIG_SECTION_GLOBAL, "plugins directory", plugins_dirs));
}
// Parse it and store it to plugin directories
- return quoted_strings_splitter(plugins_dir_list, plugin_directories, PLUGINSD_MAX_DIRECTORIES, config_isspace);
-}
-
-inline int pluginsd_split_words(char *str, char **words, int max_words) {
- return quoted_strings_splitter(str, words, max_words, pluginsd_space);
-}
-
-#ifdef ENABLE_HTTPS
-/**
- * Update Buffer
- *
- * Update the temporary buffer used to parse data received from slave
- *
- * @param output is a pointer to the vector where I will store the data
- * @param ssl is the connection pointer with the server
- *
- * @return it returns the total of bytes read on success and a negative number otherwise
- */
-int pluginsd_update_buffer(char *output, SSL *ssl) {
- ERR_clear_error();
- int bytesleft = SSL_read(ssl, output, PLUGINSD_LINE_MAX_SSL_READ);
- if(bytesleft <= 0) {
- int sslerrno = SSL_get_error(ssl, bytesleft);
- switch(sslerrno) {
- case SSL_ERROR_WANT_READ:
- case SSL_ERROR_WANT_WRITE:
- {
- break;
- }
- default:
- {
- u_long err;
- char buf[256];
- int counter = 0;
- while ((err = ERR_get_error()) != 0) {
- ERR_error_string_n(err, buf, sizeof(buf));
- info("%d SSL Handshake error (%s) on socket %d ", counter++, ERR_error_string((long)SSL_get_error(ssl, bytesleft), NULL), SSL_get_fd(ssl));
- }
- }
-
- }
- } else {
- output[bytesleft] = '\0';
- }
-
- return bytesleft;
+ return quoted_strings_splitter(plugins_dir_list, plugin_directories, PLUGINSD_MAX_DIRECTORIES, config_isspace, NULL, NULL, 0);
}
-/**
- * Get from Buffer
- *
- * Get data to process from buffer
- *
- * @param output is the output vector that will be used to parse the string.
- * @param bytesread the amount of bytes read in the previous iteration.
- * @param input the input vector where there are data to process
- * @param ssl a pointer to the connection with the server
- * @param src the first address of the input, because sometime will be necessary to restart the addr with it.
- *
- * @return It returns a pointer for the next iteration on success and NULL otherwise.
- */
-char * pluginsd_get_from_buffer(char *output, int *bytesread, char *input, SSL *ssl, char *src) {
- int copying = 1;
- char *endbuffer;
- size_t length;
- while(copying) {
- if(*bytesread > 0) {
- endbuffer = strchr(input, '\n');
- if(endbuffer) {
- copying = 0;
- endbuffer++; //Advance due the fact I wanna copy '\n'
- length = endbuffer - input;
- *bytesread -= length;
-
- memcpy(output, input, length);
- output += length;
- *output = '\0';
- input += length;
- }else {
- length = strlen(input);
- memcpy(output, input, length);
- output += length;
- input = src;
-
- *bytesread = pluginsd_update_buffer(input, ssl);
- if(*bytesread <= 0) {
- input = NULL;
- copying = 0;
- }
- }
- }else {
- //reduce sample of bytes read, print the length
- *bytesread = pluginsd_update_buffer(input, ssl);
- if(*bytesread <= 0) {
- input = NULL;
- copying = 0;
- }
- }
- }
-
- return input;
+inline int pluginsd_split_words(char *str, char **words, int max_words, char *recover_input, char **recover_location, int max_recover)
+{
+ return quoted_strings_splitter(str, words, max_words, pluginsd_space, recover_input, recover_location, max_recover);
}
-#endif
-
-inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations) {
- int enabled = cd->enabled;
-
- if(!fp || !enabled) {
- cd->enabled = 0;
- return 0;
- }
-
- size_t count = 0;
-
- char line[PLUGINSD_LINE_MAX + 1];
-
- char *words[PLUGINSD_MAX_WORDS] = { NULL };
- uint32_t BEGIN_HASH = simple_hash(PLUGINSD_KEYWORD_BEGIN);
- uint32_t END_HASH = simple_hash(PLUGINSD_KEYWORD_END);
- uint32_t FLUSH_HASH = simple_hash(PLUGINSD_KEYWORD_FLUSH);
- uint32_t CHART_HASH = simple_hash(PLUGINSD_KEYWORD_CHART);
- uint32_t DIMENSION_HASH = simple_hash(PLUGINSD_KEYWORD_DIMENSION);
- uint32_t DISABLE_HASH = simple_hash(PLUGINSD_KEYWORD_DISABLE);
- uint32_t VARIABLE_HASH = simple_hash(PLUGINSD_KEYWORD_VARIABLE);
-
- RRDSET *st = NULL;
- uint32_t hash;
-
- errno = 0;
- clearerr(fp);
-
- if(unlikely(fileno(fp) == -1)) {
- error("file descriptor given is not a valid stream");
- goto cleanup;
- }
-
-#ifdef ENABLE_HTTPS
- int bytesleft = 0;
- char tmpbuffer[PLUGINSD_LINE_MAX];
- char *readfrom = NULL;
-#endif
- char *r = NULL;
- while(!ferror(fp)) {
- if(unlikely(netdata_exit)) break;
-
-#ifdef ENABLE_HTTPS
- int normalread = 1;
- if(netdata_srv_ctx) {
- if(host->stream_ssl.conn && !host->stream_ssl.flags) {
- if(!bytesleft) {
- r = line;
- readfrom = tmpbuffer;
- bytesleft = pluginsd_update_buffer(readfrom, host->stream_ssl.conn);
- if(bytesleft <= 0) {
- break;
- }
- }
-
- readfrom = pluginsd_get_from_buffer(line, &bytesleft, readfrom, host->stream_ssl.conn, tmpbuffer);
- if(!readfrom) {
- r = NULL;
- }
-
- normalread = 0;
- }
- }
-
- if(normalread) {
- r = fgets(line, PLUGINSD_LINE_MAX, fp);
- }
-#else
- r = fgets(line, PLUGINSD_LINE_MAX, fp);
-#endif
- if(unlikely(!r)) {
- if(feof(fp))
- error("read failed: end of file");
- else if(ferror(fp))
- error("read failed: input error");
- else
- error("read failed: unknown error");
- break;
- }
-
- if(unlikely(netdata_exit)) break;
-
- line[PLUGINSD_LINE_MAX] = '\0';
-
- int w = pluginsd_split_words(line, words, PLUGINSD_MAX_WORDS);
- char *s = words[0];
- if(unlikely(!s || !*s || !w)) {
- continue;
- }
-
- // debug(D_PLUGINSD, "PLUGINSD: words 0='%s' 1='%s' 2='%s' 3='%s' 4='%s' 5='%s' 6='%s' 7='%s' 8='%s' 9='%s'", words[0], words[1], words[2], words[3], words[4], words[5], words[6], words[7], words[8], words[9]);
-
- if(likely(!simple_hash_strcmp(s, "SET", &hash))) {
- char *dimension = words[1];
- char *value = words[2];
-
- if(unlikely(!dimension || !*dimension)) {
- error("requested a SET on chart '%s' of host '%s', without a dimension. Disabling it.", st->id, host->hostname);
- enabled = 0;
- break;
- }
-
- if(unlikely(!value || !*value)) value = NULL;
-
- if(unlikely(!st)) {
- error("requested a SET on dimension %s with value %s on host '%s', without a BEGIN. Disabling it.", dimension, value?value:"<nothing>", host->hostname);
- enabled = 0;
- break;
- }
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_PLUGINSD, "is setting dimension %s/%s to %s", st->id, dimension, value?value:"<nothing>");
-
- if(value) {
- RRDDIM *rd = rrddim_find(st, dimension);
- if(unlikely(!rd)) {
- error("requested a SET to dimension with id '%s' on stats '%s' (%s) on host '%s', which does not exist. Disabling it.", dimension, st->name, st->id, st->rrdhost->hostname);
- enabled = 0;
- break;
- }
- else
- rrddim_set_by_pointer(st, rd, strtoll(value, NULL, 0));
- }
- }
- else if(likely(hash == BEGIN_HASH && !strcmp(s, PLUGINSD_KEYWORD_BEGIN))) {
- char *id = words[1];
- char *microseconds_txt = words[2];
-
- if(unlikely(!id)) {
- error("requested a BEGIN without a chart id for host '%s'. Disabling it.", host->hostname);
- enabled = 0;
- break;
- }
-
- st = rrdset_find(host, id);
- if(unlikely(!st)) {
- error("requested a BEGIN on chart '%s', which does not exist on host '%s'. Disabling it.", id, host->hostname);
- enabled = 0;
- break;
- }
-
- if(likely(st->counter_done)) {
- usec_t microseconds = 0;
- if(microseconds_txt && *microseconds_txt) microseconds = str2ull(microseconds_txt);
-
- if(likely(microseconds)) {
- if(trust_durations)
- rrdset_next_usec_unfiltered(st, microseconds);
- else
- rrdset_next_usec(st, microseconds);
- }
- else rrdset_next(st);
- }
- }
- else if(likely(hash == END_HASH && !strcmp(s, PLUGINSD_KEYWORD_END))) {
- if(unlikely(!st)) {
- error("requested an END, without a BEGIN on host '%s'. Disabling it.", host->hostname);
- enabled = 0;
- break;
- }
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_PLUGINSD, "requested an END on chart %s", st->id);
-
- rrdset_done(st);
- st = NULL;
- count++;
- }
- else if(likely(hash == CHART_HASH && !strcmp(s, PLUGINSD_KEYWORD_CHART))) {
- st = NULL;
-
- char *type = words[1];
- char *name = words[2];
- char *title = words[3];
- char *units = words[4];
- char *family = words[5];
- char *context = words[6];
- char *chart = words[7];
- char *priority_s = words[8];
- char *update_every_s = words[9];
- char *options = words[10];
- char *plugin = words[11];
- char *module = words[12];
-
- // parse the id from type
- char *id = NULL;
- if(likely(type && (id = strchr(type, '.')))) {
- *id = '\0';
- id++;
- }
-
- // make sure we have the required variables
- if(unlikely(!type || !*type || !id || !*id)) {
- error("requested a CHART, without a type.id, on host '%s'. Disabling it.", host->hostname);
- enabled = 0;
- break;
- }
-
- // parse the name, and make sure it does not include 'type.'
- if(unlikely(name && *name)) {
- // when data are coming from slaves
- // name will be type.name
- // so we have to remove 'type.' from name too
- size_t len = strlen(type);
- if(strncmp(type, name, len) == 0 && name[len] == '.')
- name = &name[len + 1];
-
- // if the name is the same with the id,
- // or is just 'NULL', clear it.
- if(unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0))
- name = NULL;
- }
-
- int priority = 1000;
- if(likely(priority_s && *priority_s)) priority = str2i(priority_s);
-
- int update_every = cd->update_every;
- if(likely(update_every_s && *update_every_s)) update_every = str2i(update_every_s);
- if(unlikely(!update_every)) update_every = cd->update_every;
-
- RRDSET_TYPE chart_type = RRDSET_TYPE_LINE;
- if(unlikely(chart)) chart_type = rrdset_type_id(chart);
-
- if(unlikely(name && !*name)) name = NULL;
- if(unlikely(family && !*family)) family = NULL;
- if(unlikely(context && !*context)) context = NULL;
- if(unlikely(!title)) title = "";
- if(unlikely(!units)) units = "unknown";
-
- debug(D_PLUGINSD, "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d"
- , type, id
- , name?name:""
- , family?family:""
- , context?context:""
- , rrdset_type_name(chart_type)
- , priority
- , update_every
- );
-
- st = rrdset_create(
- host
- , type
- , id
- , name
- , family
- , context
- , title
- , units
- , (plugin && *plugin)?plugin:cd->filename
- , module
- , priority
- , update_every
- , chart_type
- );
-
- if(options && *options) {
- if(strstr(options, "obsolete"))
- rrdset_is_obsolete(st);
- else
- rrdset_isnot_obsolete(st);
-
- if(strstr(options, "detail"))
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
- else
- rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
-
- if(strstr(options, "hidden"))
- rrdset_flag_set(st, RRDSET_FLAG_HIDDEN);
- else
- rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN);
-
- if(strstr(options, "store_first"))
- rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
- else
- rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
- }
- else {
- rrdset_isnot_obsolete(st);
- rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
- rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
- }
- }
- else if(likely(hash == DIMENSION_HASH && !strcmp(s, PLUGINSD_KEYWORD_DIMENSION))) {
- char *id = words[1];
- char *name = words[2];
- char *algorithm = words[3];
- char *multiplier_s = words[4];
- char *divisor_s = words[5];
- char *options = words[6];
-
- if(unlikely(!id || !*id)) {
- error("requested a DIMENSION, without an id, host '%s' and chart '%s'. Disabling it.", host->hostname, st?st->id:"UNSET");
- enabled = 0;
- break;
- }
- if(unlikely(!st)) {
- error("requested a DIMENSION, without a CHART, on host '%s'. Disabling it.", host->hostname);
- enabled = 0;
- break;
- }
-
- long multiplier = 1;
- if(multiplier_s && *multiplier_s) multiplier = strtol(multiplier_s, NULL, 0);
- if(unlikely(!multiplier)) multiplier = 1;
-
- long divisor = 1;
- if(likely(divisor_s && *divisor_s)) divisor = strtol(divisor_s, NULL, 0);
- if(unlikely(!divisor)) divisor = 1;
-
- if(unlikely(!algorithm || !*algorithm)) algorithm = "absolute";
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- debug(D_PLUGINSD, "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'"
- , st->id
- , id
- , name?name:""
- , rrd_algorithm_name(rrd_algorithm_id(algorithm))
- , multiplier
- , divisor
- , options?options:""
- );
-
- RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm));
- rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN);
- rrddim_flag_clear(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
- if(options && *options) {
- if(strstr(options, "obsolete") != NULL)
- rrddim_is_obsolete(st, rd);
- else
- rrddim_isnot_obsolete(st, rd);
- if(strstr(options, "hidden") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN);
- if(strstr(options, "noreset") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
- if(strstr(options, "nooverflow") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
- }
- else {
- rrddim_isnot_obsolete(st, rd);
- }
- }
- else if(likely(hash == VARIABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_VARIABLE))) {
- char *name = words[1];
- char *value = words[2];
- int global = (st)?0:1;
-
- if(name && *name) {
- if((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) {
- global = 1;
- name = words[2];
- value = words[3];
- }
- else if((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) {
- global = 0;
- name = words[2];
- value = words[3];
- }
- }
-
- if(unlikely(!name || !*name)) {
- error("requested a VARIABLE on host '%s', without a variable name. Disabling it.", host->hostname);
- enabled = 0;
- break;
- }
-
- if(unlikely(!value || !*value))
- value = NULL;
-
- if(value) {
- char *endptr = NULL;
- calculated_number v = (calculated_number)str2ld(value, &endptr);
-
- if(unlikely(endptr && *endptr)) {
- if(endptr == value)
- error("the value '%s' of VARIABLE '%s' on host '%s' cannot be parsed as a number", value, name, host->hostname);
- else
- error("the value '%s' of VARIABLE '%s' on host '%s' has leftovers: '%s'", value, name, host->hostname, endptr);
- }
-
- if(global) {
- RRDVAR *rv = rrdvar_custom_host_variable_create(host, name);
- if (rv) rrdvar_custom_host_variable_set(host, rv, v);
- else error("cannot find/create HOST VARIABLE '%s' on host '%s'", name, host->hostname);
- }
- else if(st) {
- RRDSETVAR *rs = rrdsetvar_custom_chart_variable_create(st, name);
- if (rs) rrdsetvar_custom_chart_variable_set(rs, v);
- else error("cannot find/create CHART VARIABLE '%s' on host '%s', chart '%s'", name, host->hostname, st->id);
- }
- else
- error("cannot find/create CHART VARIABLE '%s' on host '%s' without a chart", name, host->hostname);
- }
- else
- error("cannot set %s VARIABLE '%s' on host '%s' to an empty value", (global)?"HOST":"CHART", name, host->hostname);
- }
- else if(likely(hash == FLUSH_HASH && !strcmp(s, PLUGINSD_KEYWORD_FLUSH))) {
- debug(D_PLUGINSD, "requested a FLUSH");
- st = NULL;
- }
- else if(unlikely(hash == DISABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_DISABLE))) {
- info("called DISABLE. Disabling it.");
- enabled = 0;
- break;
- }
- else {
- error("sent command '%s' which is not known by netdata, for host '%s'. Disabling it.", s, host->hostname);
- enabled = 0;
- break;
- }
- }
-
-cleanup:
- cd->enabled = enabled;
-
- if(likely(count)) {
- cd->successful_collections += count;
- cd->serial_failures = 0;
- }
- else
- cd->serial_failures++;
-
- return count;
-}
-
-static void pluginsd_worker_thread_cleanup(void *arg) {
+static void pluginsd_worker_thread_cleanup(void *arg)
+{
struct plugind *cd = (struct plugind *)arg;
- if(cd->enabled && !cd->obsolete) {
+ if (cd->enabled && !cd->obsolete) {
cd->obsolete = 1;
info("data collection thread exiting");
@@ -649,7 +154,7 @@ static void pluginsd_worker_thread_cleanup(void *arg) {
info("killing child process pid %d", cd->pid);
if (killpid(cd->pid) != -1) {
info("waiting for child process pid %d to exit...", cd->pid);
- waitid(P_PID, (id_t) cd->pid, &info, WEXITED);
+ waitid(P_PID, (id_t)cd->pid, &info, WEXITED);
}
cd->pid = 0;
}
@@ -657,24 +162,25 @@ static void pluginsd_worker_thread_cleanup(void *arg) {
}
#define SERIAL_FAILURES_THRESHOLD 10
-static void pluginsd_worker_thread_handle_success(struct plugind *cd) {
+static void pluginsd_worker_thread_handle_success(struct plugind *cd)
+{
if (likely(cd->successful_collections)) {
- sleep((unsigned int) cd->update_every);
+ sleep((unsigned int)cd->update_every);
return;
}
- if(likely(cd->serial_failures <= SERIAL_FAILURES_THRESHOLD)) {
- info("'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.",
+ if (likely(cd->serial_failures <= SERIAL_FAILURES_THRESHOLD)) {
+ info(
+ "'%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.",
cd->fullfilename, cd->pid,
- cd->enabled ?
- "Waiting a bit before starting it again." :
- "Will not start it again - it is now disabled.");
- sleep((unsigned int) (cd->update_every * 10));
+ cd->enabled ? "Waiting a bit before starting it again." : "Will not start it again - it is now disabled.");
+ sleep((unsigned int)(cd->update_every * 10));
return;
}
if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) {
- error("'%s' (pid %d) does not generate useful output, although it reports success (exits with 0)."
+ error(
+ "'%s' (pid %d) does not generate useful output, although it reports success (exits with 0)."
"We have tried to collect something %zu times - unsuccessfully. Disabling it.",
cd->fullfilename, cd->pid, cd->serial_failures);
cd->enabled = 0;
@@ -684,7 +190,8 @@ static void pluginsd_worker_thread_handle_success(struct plugind *cd) {
return;
}
-static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_ret_code) {
+static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_ret_code)
+{
if (worker_ret_code == -1) {
info("'%s' (pid %d) was killed with SIGTERM. Disabling it.", cd->fullfilename, cd->pid);
cd->enabled = 0;
@@ -692,24 +199,25 @@ static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_r
}
if (!cd->successful_collections) {
- error("'%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.",
- cd->fullfilename, cd->pid, worker_ret_code);
+ error(
+ "'%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.", cd->fullfilename,
+ cd->pid, worker_ret_code);
cd->enabled = 0;
return;
}
if (cd->serial_failures <= SERIAL_FAILURES_THRESHOLD) {
- error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s",
+ error(
+ "'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s",
cd->fullfilename, cd->pid, worker_ret_code, cd->successful_collections,
- cd->enabled ?
- "Waiting a bit before starting it again." :
- "Will not start it again - it is disabled.");
- sleep((unsigned int) (cd->update_every * 10));
+ cd->enabled ? "Waiting a bit before starting it again." : "Will not start it again - it is disabled.");
+ sleep((unsigned int)(cd->update_every * 10));
return;
}
if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) {
- error("'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)."
+ error(
+ "'%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)."
"We tried to restart it %zu times, but it failed to generate data. Disabling it.",
cd->fullfilename, cd->pid, worker_ret_code, cd->successful_collections, cd->serial_failures);
cd->enabled = 0;
@@ -720,7 +228,8 @@ static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_r
}
#undef SERIAL_FAILURES_THRESHOLD
-void *pluginsd_worker_thread(void *arg) {
+void *pluginsd_worker_thread(void *arg)
+{
netdata_thread_cleanup_push(pluginsd_worker_thread_cleanup, arg);
struct plugind *cd = (struct plugind *)arg;
@@ -728,9 +237,9 @@ void *pluginsd_worker_thread(void *arg) {
cd->obsolete = 0;
size_t count = 0;
- while(!netdata_exit) {
+ while (!netdata_exit) {
FILE *fp = mypopen(cd->cmd, &cd->pid);
- if(unlikely(!fp)) {
+ if (unlikely(!fp)) {
error("Cannot popen(\"%s\", \"r\").", cd->cmd);
break;
}
@@ -748,14 +257,16 @@ void *pluginsd_worker_thread(void *arg) {
pluginsd_worker_thread_handle_error(cd, worker_ret_code);
cd->pid = 0;
- if(unlikely(!cd->enabled)) break;
+ if (unlikely(!cd->enabled))
+ break;
}
netdata_thread_cleanup_pop(1);
return NULL;
}
-static void pluginsd_main_cleanup(void *data) {
+static void pluginsd_main_cleanup(void *data)
+{
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
info("cleaning up...");
@@ -772,31 +283,34 @@ static void pluginsd_main_cleanup(void *data) {
static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
}
-void *pluginsd_main(void *ptr) {
+void *pluginsd_main(void *ptr)
+{
netdata_thread_cleanup_push(pluginsd_main_cleanup, ptr);
int automatic_run = config_get_boolean(CONFIG_SECTION_PLUGINS, "enable running new plugins", 1);
- int scan_frequency = (int) config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60);
- if(scan_frequency < 1) scan_frequency = 1;
+ int scan_frequency = (int)config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60);
+ if (scan_frequency < 1)
+ scan_frequency = 1;
// disable some plugins by default
config_get_boolean(CONFIG_SECTION_PLUGINS, "slabinfo", CONFIG_BOOLEAN_NO);
// store the errno for each plugins directory
// so that we don't log broken directories on each loop
- int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 };
+ int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 };
- while(!netdata_exit) {
+ while (!netdata_exit) {
int idx;
const char *directory_name;
- for( idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]) ; idx++ ) {
- if(unlikely(netdata_exit)) break;
+ for (idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]); idx++) {
+ if (unlikely(netdata_exit))
+ break;
errno = 0;
DIR *dir = opendir(directory_name);
- if(unlikely(!dir)) {
- if(directory_errors[idx] != errno) {
+ if (unlikely(!dir)) {
+ if (directory_errors[idx] != errno) {
directory_errors[idx] = errno;
error("cannot open plugins directory '%s'", directory_name);
}
@@ -804,16 +318,19 @@ void *pluginsd_main(void *ptr) {
}
struct dirent *file = NULL;
- while(likely((file = readdir(dir)))) {
- if(unlikely(netdata_exit)) break;
+ while (likely((file = readdir(dir)))) {
+ if (unlikely(netdata_exit))
+ break;
debug(D_PLUGINSD, "examining file '%s'", file->d_name);
- if(unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0)) continue;
+ if (unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0))
+ continue;
- int len = (int) strlen(file->d_name);
- if(unlikely(len <= (int)PLUGINSD_FILE_SUFFIX_LEN)) continue;
- if(unlikely(strcmp(PLUGINSD_FILE_SUFFIX, &file->d_name[len - (int)PLUGINSD_FILE_SUFFIX_LEN]) != 0)) {
+ int len = (int)strlen(file->d_name);
+ if (unlikely(len <= (int)PLUGINSD_FILE_SUFFIX_LEN))
+ continue;
+ if (unlikely(strcmp(PLUGINSD_FILE_SUFFIX, &file->d_name[len - (int)PLUGINSD_FILE_SUFFIX_LEN]) != 0)) {
debug(D_PLUGINSD, "file '%s' does not end in '%s'", file->d_name, PLUGINSD_FILE_SUFFIX);
continue;
}
@@ -822,24 +339,25 @@ void *pluginsd_main(void *ptr) {
snprintfz(pluginname, CONFIG_MAX_NAME, "%.*s", (int)(len - PLUGINSD_FILE_SUFFIX_LEN), file->d_name);
int enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, pluginname, automatic_run);
- if(unlikely(!enabled)) {
+ if (unlikely(!enabled)) {
debug(D_PLUGINSD, "plugin '%s' is not enabled", file->d_name);
continue;
}
// check if it runs already
struct plugind *cd;
- for(cd = pluginsd_root ; cd ; cd = cd->next)
- if(unlikely(strcmp(cd->filename, file->d_name) == 0)) break;
+ for (cd = pluginsd_root; cd; cd = cd->next)
+ if (unlikely(strcmp(cd->filename, file->d_name) == 0))
+ break;
- if(likely(cd && !cd->obsolete)) {
+ if (likely(cd && !cd->obsolete)) {
debug(D_PLUGINSD, "plugin '%s' is already running", cd->filename);
continue;
}
// it is not running
// allocate a new one, or use the obsolete one
- if(unlikely(!cd)) {
+ if (unlikely(!cd)) {
cd = callocz(sizeof(struct plugind), 1);
snprintfz(cd->id, CONFIG_MAX_NAME, "plugin:%s", pluginname);
@@ -848,24 +366,28 @@ void *pluginsd_main(void *ptr) {
snprintfz(cd->fullfilename, FILENAME_MAX, "%s/%s", directory_name, cd->filename);
cd->enabled = enabled;
- cd->update_every = (int) config_get_number(cd->id, "update every", localhost->rrd_update_every);
+ cd->update_every = (int)config_get_number(cd->id, "update every", localhost->rrd_update_every);
cd->started_t = now_realtime_sec();
char *def = "";
- snprintfz(cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every, config_get(cd->id, "command options", def));
+ snprintfz(
+ cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every,
+ config_get(cd->id, "command options", def));
// link it
- if(likely(pluginsd_root)) cd->next = pluginsd_root;
+ if (likely(pluginsd_root))
+ cd->next = pluginsd_root;
pluginsd_root = cd;
// it is not currently running
cd->obsolete = 1;
- if(cd->enabled) {
+ if (cd->enabled) {
char tag[NETDATA_THREAD_TAG_MAX + 1];
snprintfz(tag, NETDATA_THREAD_TAG_MAX, "PLUGINSD[%s]", pluginname);
// spawn a new thread for it
- netdata_thread_create(&cd->thread, tag, NETDATA_THREAD_OPTION_DEFAULT, pluginsd_worker_thread, cd);
+ netdata_thread_create(
+ &cd->thread, tag, NETDATA_THREAD_OPTION_DEFAULT, pluginsd_worker_thread, cd);
}
}
}
@@ -873,7 +395,7 @@ void *pluginsd_main(void *ptr) {
closedir(dir);
}
- sleep((unsigned int) scan_frequency);
+ sleep((unsigned int)scan_frequency);
}
netdata_thread_cleanup_pop(1);
diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h
index 7d5c7dda4..fd99b3584 100644
--- a/collectors/plugins.d/plugins_d.h
+++ b/collectors/plugins.d/plugins_d.h
@@ -29,6 +29,13 @@
#define PLUGINSD_KEYWORD_FLUSH "FLUSH"
#define PLUGINSD_KEYWORD_DISABLE "DISABLE"
#define PLUGINSD_KEYWORD_VARIABLE "VARIABLE"
+#define PLUGINSD_KEYWORD_LABEL "LABEL"
+#define PLUGINSD_KEYWORD_OVERWRITE "OVERWRITE"
+#define PLUGINSD_KEYWORD_GUID "GUID"
+#define PLUGINSD_KEYWORD_CONTEXT "CONTEXT"
+#define PLUGINSD_KEYWORD_TOMBSTONE "TOMBSTONE"
+#define PLUGINSD_KEYWORD_HOST "HOST"
+
#define PLUGINSD_LINE_MAX 1024
#define PLUGINSD_LINE_MAX_SSL_READ 512
@@ -58,7 +65,7 @@ struct plugind {
volatile sig_atomic_t enabled; // if this is enabled or not
time_t started_t;
-
+ uint32_t version;
struct plugind *next;
};
@@ -67,10 +74,11 @@ extern struct plugind *pluginsd_root;
extern void *pluginsd_main(void *ptr);
extern size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations);
-extern int pluginsd_split_words(char *str, char **words, int max_words);
+extern int pluginsd_split_words(char *str, char **words, int max_words, char *recover_string, char **recover_location, int max_recover);
extern int pluginsd_initialize_plugin_directories();
extern int config_isspace(char c);
+extern int pluginsd_space(char c);
#endif /* NETDATA_PLUGINS_D_H */
diff --git a/collectors/plugins.d/pluginsd_parser.c b/collectors/plugins.d/pluginsd_parser.c
new file mode 100644
index 000000000..4a97c5535
--- /dev/null
+++ b/collectors/plugins.d/pluginsd_parser.c
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "pluginsd_parser.h"
+
+/*
+ * This is the action defined for the FLUSH command
+ */
+PARSER_RC pluginsd_set_action(void *user, RRDSET *st, RRDDIM *rd, long long int value)
+{
+ UNUSED(user);
+
+ rrddim_set_by_pointer(st, rd, value);
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_flush_action(void *user, RRDSET *st)
+{
+ UNUSED(user);
+ UNUSED(st);
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_begin_action(void *user, RRDSET *st, usec_t microseconds, int trust_durations)
+{
+ UNUSED(user);
+ if (likely(st->counter_done)) {
+ if (likely(microseconds)) {
+ if (trust_durations)
+ rrdset_next_usec_unfiltered(st, microseconds);
+ else
+ rrdset_next_usec(st, microseconds);
+ } else
+ rrdset_next(st);
+ }
+ return PARSER_RC_OK;
+}
+
+
+PARSER_RC pluginsd_end_action(void *user, RRDSET *st)
+{
+ UNUSED(user);
+
+ rrdset_done(st);
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_chart_action(void *user, char *type, char *id, char *name, char *family, char *context, char *title, char *units, char *plugin,
+ char *module, int priority, int update_every, RRDSET_TYPE chart_type, char *options)
+{
+ RRDSET *st = NULL;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+
+ st = rrdset_create(
+ host, type, id, name, family, context, title, units,
+ plugin, module, priority, update_every,
+ chart_type);
+
+ if (options && *options) {
+ if (strstr(options, "obsolete"))
+ rrdset_is_obsolete(st);
+ else
+ rrdset_isnot_obsolete(st);
+
+ if (strstr(options, "detail"))
+ rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
+
+ if (strstr(options, "hidden"))
+ rrdset_flag_set(st, RRDSET_FLAG_HIDDEN);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN);
+
+ if (strstr(options, "store_first"))
+ rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
+ else
+ rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
+ } else {
+ rrdset_isnot_obsolete(st);
+ rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
+ rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
+ }
+ ((PARSER_USER_OBJECT *)user)->st = st;
+
+ return PARSER_RC_OK;
+}
+
+
+PARSER_RC pluginsd_disable_action(void *user)
+{
+ UNUSED(user);
+
+ info("called DISABLE. Disabling it.");
+ ((PARSER_USER_OBJECT *) user)->enabled = 0;
+ return PARSER_RC_ERROR;
+}
+
+
+PARSER_RC pluginsd_variable_action(void *user, RRDHOST *host, RRDSET *st, char *name, int global, calculated_number value)
+{
+ UNUSED(user);
+
+ if (global) {
+ RRDVAR *rv = rrdvar_custom_host_variable_create(host, name);
+ if (rv)
+ rrdvar_custom_host_variable_set(host, rv, value);
+ else
+ error("cannot find/create HOST VARIABLE '%s' on host '%s'", name, host->hostname);
+ } else {
+ RRDSETVAR *rs = rrdsetvar_custom_chart_variable_create(st, name);
+ if (rs)
+ rrdsetvar_custom_chart_variable_set(rs, value);
+ else
+ error("cannot find/create CHART VARIABLE '%s' on host '%s', chart '%s'", name, host->hostname, st->id);
+ }
+ return PARSER_RC_OK;
+}
+
+
+
+PARSER_RC pluginsd_dimension_action(void *user, RRDSET *st, char *id, char *name, char *algorithm, long multiplier, long divisor, char *options,
+ RRD_ALGORITHM algorithm_type)
+{
+ UNUSED(user);
+ UNUSED(algorithm);
+
+ RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, algorithm_type);
+ rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN);
+ rrddim_flag_clear(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ if (options && *options) {
+ if (strstr(options, "obsolete") != NULL)
+ rrddim_is_obsolete(st, rd);
+ else
+ rrddim_isnot_obsolete(st, rd);
+ if (strstr(options, "hidden") != NULL)
+ rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN);
+ if (strstr(options, "noreset") != NULL)
+ rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ if (strstr(options, "nooverflow") != NULL)
+ rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
+ } else {
+ rrddim_isnot_obsolete(st, rd);
+ }
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_label_action(void *user, char *key, char *value, LABEL_SOURCE source)
+{
+
+ ((PARSER_USER_OBJECT *) user)->new_labels = add_label_to_list(((PARSER_USER_OBJECT *) user)->new_labels, key, value, source);
+
+ return PARSER_RC_OK;
+}
+
+
+PARSER_RC pluginsd_overwrite_action(void *user, RRDHOST *host, struct label *new_labels)
+{
+ UNUSED(user);
+
+ if (!host->labels.head) {
+ host->labels.head = new_labels;
+ } else {
+ rrdhost_rdlock(host);
+ replace_label_list(&host->labels, new_labels);
+ rrdhost_unlock(host);
+ }
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_set(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *dimension = words[1];
+ char *value = words[2];
+
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+
+ if (unlikely(!dimension || !*dimension)) {
+ error("requested a SET on chart '%s' of host '%s', without a dimension. Disabling it.", st->id, host->hostname);
+ goto disable;
+ }
+
+ if (unlikely(!value || !*value))
+ value = NULL;
+
+ if (unlikely(!st)) {
+ error(
+ "requested a SET on dimension %s with value %s on host '%s', without a BEGIN. Disabling it.", dimension,
+ value ? value : "<nothing>", host->hostname);
+ goto disable;
+ }
+
+ if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
+ debug(D_PLUGINSD, "is setting dimension %s/%s to %s", st->id, dimension, value ? value : "<nothing>");
+
+ if (value) {
+ RRDDIM *rd = rrddim_find(st, dimension);
+ if (unlikely(!rd)) {
+ error(
+ "requested a SET to dimension with id '%s' on stats '%s' (%s) on host '%s', which does not exist. Disabling it.",
+ dimension, st->name, st->id, st->rrdhost->hostname);
+ goto disable;
+ } else {
+ if (plugins_action->set_action) {
+ return plugins_action->set_action(
+ user, st, rd, strtoll(value, NULL, 0));
+ }
+ }
+ }
+ return PARSER_RC_OK;
+
+disable:
+ ((PARSER_USER_OBJECT *) user)->enabled = 0;
+ return PARSER_RC_ERROR;
+}
+
+PARSER_RC pluginsd_begin(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *id = words[1];
+ char *microseconds_txt = words[2];
+
+ RRDSET *st = NULL;
+ RRDHOST *host = ((PARSER_USER_OBJECT *)user)->host;
+
+ if (unlikely(!id)) {
+ error("requested a BEGIN without a chart id for host '%s'. Disabling it.", host->hostname);
+ goto disable;
+ }
+
+ st = rrdset_find(host, id);
+ if (unlikely(!st)) {
+ error("requested a BEGIN on chart '%s', which does not exist on host '%s'. Disabling it.", id, host->hostname);
+ goto disable;
+ }
+ ((PARSER_USER_OBJECT *)user)->st = st;
+
+ usec_t microseconds = 0;
+ if (microseconds_txt && *microseconds_txt)
+ microseconds = str2ull(microseconds_txt);
+
+ if (plugins_action->begin_action) {
+ return plugins_action->begin_action(user, st, microseconds,
+ ((PARSER_USER_OBJECT *)user)->trust_durations);
+ }
+ return PARSER_RC_OK;
+disable:
+ ((PARSER_USER_OBJECT *)user)->enabled = 0;
+ return PARSER_RC_ERROR;
+}
+
+PARSER_RC pluginsd_end(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(words);
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+
+ if (unlikely(!st)) {
+ error("requested an END, without a BEGIN on host '%s'. Disabling it.", host->hostname);
+ ((PARSER_USER_OBJECT *) user)->enabled = 0;
+ return PARSER_RC_ERROR;
+ }
+
+ if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
+ debug(D_PLUGINSD, "requested an END on chart %s", st->id);
+
+ ((PARSER_USER_OBJECT *) user)->st = NULL;
+ ((PARSER_USER_OBJECT *) user)->count++;
+ if (plugins_action->end_action) {
+ return plugins_action->end_action(user, st);
+ }
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_chart(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+ if (unlikely(!host && !((PARSER_USER_OBJECT *) user)->host_exists)) {
+ debug(D_PLUGINSD, "Ignoring chart belonging to missing or ignored host.");
+ return PARSER_RC_OK;
+ }
+
+ char *type = words[1];
+ char *name = words[2];
+ char *title = words[3];
+ char *units = words[4];
+ char *family = words[5];
+ char *context = words[6];
+ char *chart = words[7];
+ char *priority_s = words[8];
+ char *update_every_s = words[9];
+ char *options = words[10];
+ char *plugin = words[11];
+ char *module = words[12];
+
+ int have_action = ((plugins_action->chart_action) != NULL);
+
+ // parse the id from type
+ char *id = NULL;
+ if (likely(type && (id = strchr(type, '.')))) {
+ *id = '\0';
+ id++;
+ }
+
+ // make sure we have the required variables
+ if (unlikely((!type || !*type || !id || !*id))) {
+ if (likely(host))
+ error("requested a CHART, without a type.id, on host '%s'. Disabling it.", host->hostname);
+ else
+ error("requested a CHART, without a type.id. Disabling it.");
+ ((PARSER_USER_OBJECT *) user)->enabled = 0;
+ return PARSER_RC_ERROR;
+ }
+
+ // parse the name, and make sure it does not include 'type.'
+ if (unlikely(name && *name)) {
+ // when data are streamed from child nodes
+ // name will be type.name
+ // so we have to remove 'type.' from name too
+ size_t len = strlen(type);
+ if (strncmp(type, name, len) == 0 && name[len] == '.')
+ name = &name[len + 1];
+
+ // if the name is the same with the id,
+ // or is just 'NULL', clear it.
+ if (unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0))
+ name = NULL;
+ }
+
+ int priority = 1000;
+ if (likely(priority_s && *priority_s))
+ priority = str2i(priority_s);
+
+ int update_every = ((PARSER_USER_OBJECT *) user)->cd->update_every;
+ if (likely(update_every_s && *update_every_s))
+ update_every = str2i(update_every_s);
+ if (unlikely(!update_every))
+ update_every = ((PARSER_USER_OBJECT *) user)->cd->update_every;
+
+ RRDSET_TYPE chart_type = RRDSET_TYPE_LINE;
+ if (unlikely(chart))
+ chart_type = rrdset_type_id(chart);
+
+ if (unlikely(name && !*name))
+ name = NULL;
+ if (unlikely(family && !*family))
+ family = NULL;
+ if (unlikely(context && !*context))
+ context = NULL;
+ if (unlikely(!title))
+ title = "";
+ if (unlikely(!units))
+ units = "unknown";
+
+ debug(
+ D_PLUGINSD,
+ "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d",
+ type, id, name ? name : "", family ? family : "", context ? context : "", rrdset_type_name(chart_type),
+ priority, update_every);
+
+ if (have_action) {
+ return plugins_action->chart_action(
+ user, type, id, name, family, context, title, units,
+ (plugin && *plugin) ? plugin : ((PARSER_USER_OBJECT *)user)->cd->filename, module, priority, update_every,
+ chart_type, options);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_dimension(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *id = words[1];
+ char *name = words[2];
+ char *algorithm = words[3];
+ char *multiplier_s = words[4];
+ char *divisor_s = words[5];
+ char *options = words[6];
+
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+ if (unlikely(!host && !((PARSER_USER_OBJECT *) user)->host_exists)) {
+ debug(D_PLUGINSD, "Ignoring dimension belonging to missing or ignored host.");
+ return PARSER_RC_OK;
+ }
+
+ if (unlikely(!id)) {
+ error(
+ "requested a DIMENSION, without an id, host '%s' and chart '%s'. Disabling it.", host->hostname,
+ st ? st->id : "UNSET");
+ goto disable;
+ }
+
+ if (unlikely(!st && !((PARSER_USER_OBJECT *) user)->st_exists)) {
+ error("requested a DIMENSION, without a CHART, on host '%s'. Disabling it.", host->hostname);
+ goto disable;
+ }
+
+ long multiplier = 1;
+ if (multiplier_s && *multiplier_s) {
+ multiplier = strtol(multiplier_s, NULL, 0);
+ if (unlikely(!multiplier))
+ multiplier = 1;
+ }
+
+ long divisor = 1;
+ if (likely(divisor_s && *divisor_s)) {
+ divisor = strtol(divisor_s, NULL, 0);
+ if (unlikely(!divisor))
+ divisor = 1;
+ }
+
+ if (unlikely(!algorithm || !*algorithm))
+ algorithm = "absolute";
+
+ if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
+ debug(
+ D_PLUGINSD,
+ "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'",
+ st->id, id, name ? name : "", rrd_algorithm_name(rrd_algorithm_id(algorithm)), multiplier, divisor,
+ options ? options : "");
+
+ if (plugins_action->dimension_action) {
+ return plugins_action->dimension_action(
+ user, st, id, name, algorithm,
+ multiplier, divisor, (options && *options)?options:NULL, rrd_algorithm_id(algorithm));
+ }
+
+ return PARSER_RC_OK;
+disable:
+ ((PARSER_USER_OBJECT *)user)->enabled = 0;
+ return PARSER_RC_ERROR;
+}
+
+PARSER_RC pluginsd_variable(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *name = words[1];
+ char *value = words[2];
+ calculated_number v;
+
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+
+ int global = (st) ? 0 : 1;
+
+ if (name && *name) {
+ if ((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) {
+ global = 1;
+ name = words[2];
+ value = words[3];
+ } else if ((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) {
+ global = 0;
+ name = words[2];
+ value = words[3];
+ }
+ }
+
+ if (unlikely(!name || !*name)) {
+ error("requested a VARIABLE on host '%s', without a variable name. Disabling it.", host->hostname);
+ ((PARSER_USER_OBJECT *)user)->enabled = 0;
+ return PARSER_RC_ERROR;
+ }
+
+ if (unlikely(!value || !*value))
+ value = NULL;
+
+ if (unlikely(!value)) {
+ error("cannot set %s VARIABLE '%s' on host '%s' to an empty value", (global) ? "HOST" : "CHART", name,
+ host->hostname);
+ return PARSER_RC_OK;
+ }
+
+ if (!global && !st) {
+ error("cannot find/create CHART VARIABLE '%s' on host '%s' without a chart", name, host->hostname);
+ return PARSER_RC_OK;
+ }
+
+ char *endptr = NULL;
+ v = (calculated_number)str2ld(value, &endptr);
+ if (unlikely(endptr && *endptr)) {
+ if (endptr == value)
+ error(
+ "the value '%s' of VARIABLE '%s' on host '%s' cannot be parsed as a number", value, name,
+ host->hostname);
+ else
+ error(
+ "the value '%s' of VARIABLE '%s' on host '%s' has leftovers: '%s'", value, name, host->hostname,
+ endptr);
+ }
+
+ if (plugins_action->variable_action) {
+ return plugins_action->variable_action(user, host, st, name, global, v);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_flush(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(words);
+ debug(D_PLUGINSD, "requested a FLUSH");
+ RRDSET *st = ((PARSER_USER_OBJECT *) user)->st;
+ ((PARSER_USER_OBJECT *) user)->st = NULL;
+ if (plugins_action->flush_action) {
+ return plugins_action->flush_action(user, st);
+ }
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_disable(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(user);
+ UNUSED(words);
+
+ if (plugins_action->disable_action) {
+ return plugins_action->disable_action(user);
+ }
+ return PARSER_RC_ERROR;
+}
+
+PARSER_RC pluginsd_label(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *store;
+
+ if (!words[1] || !words[2] || !words[3]) {
+ error("Ignoring malformed or empty LABEL command.");
+ return PARSER_RC_OK;
+ }
+ if (!words[4])
+ store = words[3];
+ else {
+ store = callocz(PLUGINSD_LINE_MAX + 1, sizeof(char));
+ size_t remaining = PLUGINSD_LINE_MAX;
+ char *move = store;
+ int i = 3;
+ while (i < PLUGINSD_MAX_WORDS) {
+ size_t length = strlen(words[i]);
+ if ((length + 1) >= remaining)
+ break;
+
+ remaining -= (length + 1);
+ memcpy(move, words[i], length);
+ move += length;
+ *move++ = ' ';
+
+ i++;
+ if (!words[i])
+ break;
+ }
+ }
+
+ if (plugins_action->label_action) {
+ PARSER_RC rc = plugins_action->label_action(user, words[1], store, strtol(words[2], NULL, 10));
+ if (store != words[3])
+ freez(store);
+ return rc;
+ }
+
+ if (store != words[3])
+ freez(store);
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_overwrite(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ UNUSED(words);
+
+ RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host;
+ debug(D_PLUGINSD, "requested a OVERWITE a variable");
+
+ struct label *new_labels = ((PARSER_USER_OBJECT *)user)->new_labels;
+ ((PARSER_USER_OBJECT *)user)->new_labels = NULL;
+
+ if (plugins_action->overwrite_action) {
+ return plugins_action->overwrite_action(user, host, new_labels);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_guid(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *uuid_str = words[1];
+ uuid_t uuid;
+
+ if (unlikely(!uuid_str)) {
+ error("requested a GUID, without a uuid.");
+ return PARSER_RC_ERROR;
+ }
+ if (unlikely(strlen(uuid_str) != GUID_LEN || uuid_parse(uuid_str, uuid) == -1)) {
+ error("requested a GUID, without a valid uuid string.");
+ return PARSER_RC_ERROR;
+ }
+
+ debug(D_PLUGINSD, "Parsed uuid=%s", uuid_str);
+ if (plugins_action->guid_action) {
+ return plugins_action->guid_action(user, &uuid);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_context(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *uuid_str = words[1];
+ uuid_t uuid;
+
+ if (unlikely(!uuid_str)) {
+ error("requested a CONTEXT, without a uuid.");
+ return PARSER_RC_ERROR;
+ }
+ if (unlikely(strlen(uuid_str) != GUID_LEN || uuid_parse(uuid_str, uuid) == -1)) {
+ error("requested a CONTEXT, without a valid uuid string.");
+ return PARSER_RC_ERROR;
+ }
+
+ debug(D_PLUGINSD, "Parsed uuid=%s", uuid_str);
+ if (plugins_action->context_action) {
+ return plugins_action->context_action(user, &uuid);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC pluginsd_tombstone(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *uuid_str = words[1];
+ uuid_t uuid;
+
+ if (unlikely(!uuid_str)) {
+ error("requested a TOMBSTONE, without a uuid.");
+ return PARSER_RC_ERROR;
+ }
+ if (unlikely(strlen(uuid_str) != GUID_LEN || uuid_parse(uuid_str, uuid) == -1)) {
+ error("requested a TOMBSTONE, without a valid uuid string.");
+ return PARSER_RC_ERROR;
+ }
+
+ debug(D_PLUGINSD, "Parsed uuid=%s", uuid_str);
+ if (plugins_action->tombstone_action) {
+ return plugins_action->tombstone_action(user, &uuid);
+ }
+
+ return PARSER_RC_OK;
+}
+
+PARSER_RC metalog_pluginsd_host(char **words, void *user, PLUGINSD_ACTION *plugins_action)
+{
+ char *machine_guid = words[1];
+ char *hostname = words[2];
+ char *registry_hostname = words[3];
+ char *update_every_s = words[4];
+ char *os = words[5];
+ char *timezone = words[6];
+ char *tags = words[7];
+
+ int update_every = 1;
+ if (likely(update_every_s && *update_every_s))
+ update_every = str2i(update_every_s);
+ if (unlikely(!update_every))
+ update_every = 1;
+
+ debug(D_PLUGINSD, "HOST PARSED: guid=%s, hostname=%s, reg_host=%s, update=%d, os=%s, timezone=%s, tags=%s",
+ machine_guid, hostname, registry_hostname, update_every, os, timezone, tags);
+
+ if (plugins_action->host_action) {
+ return plugins_action->host_action(
+ user, machine_guid, hostname, registry_hostname, update_every, os, timezone, tags);
+ }
+
+ return PARSER_RC_OK;
+}
+
+// New plugins.d parser
+
+inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations)
+{
+ int enabled = cd->enabled;
+
+ if (!fp || !enabled) {
+ cd->enabled = 0;
+ return 0;
+ }
+
+ if (unlikely(fileno(fp) == -1)) {
+ error("file descriptor given is not a valid stream");
+ cd->serial_failures++;
+ return 0;
+ }
+ clearerr(fp);
+
+ PARSER_USER_OBJECT *user = callocz(1, sizeof(*user));
+ ((PARSER_USER_OBJECT *) user)->enabled = cd->enabled;
+ ((PARSER_USER_OBJECT *) user)->host = host;
+ ((PARSER_USER_OBJECT *) user)->cd = cd;
+ ((PARSER_USER_OBJECT *) user)->trust_durations = trust_durations;
+
+ PARSER *parser = parser_init(host, user, fp, PARSER_INPUT_SPLIT);
+
+ if (unlikely(!parser)) {
+ error("Failed to initialize parser");
+ cd->serial_failures++;
+ return 0;
+ }
+
+ parser->plugins_action->begin_action = &pluginsd_begin_action;
+ parser->plugins_action->flush_action = &pluginsd_flush_action;
+ parser->plugins_action->end_action = &pluginsd_end_action;
+ parser->plugins_action->disable_action = &pluginsd_disable_action;
+ parser->plugins_action->variable_action = &pluginsd_variable_action;
+ parser->plugins_action->dimension_action = &pluginsd_dimension_action;
+ parser->plugins_action->label_action = &pluginsd_label_action;
+ parser->plugins_action->overwrite_action = &pluginsd_overwrite_action;
+ parser->plugins_action->chart_action = &pluginsd_chart_action;
+ parser->plugins_action->set_action = &pluginsd_set_action;
+
+ user->parser = parser;
+
+ while (likely(!parser_next(parser))) {
+ if (unlikely(netdata_exit || parser_action(parser, NULL)))
+ break;
+ }
+ info("PARSER ended");
+
+ parser_destroy(parser);
+
+ cd->enabled = ((PARSER_USER_OBJECT *) user)->enabled;
+ size_t count = ((PARSER_USER_OBJECT *) user)->count;
+
+ freez(user);
+
+ if (likely(count)) {
+ cd->successful_collections += count;
+ cd->serial_failures = 0;
+ } else
+ cd->serial_failures++;
+
+ return count;
+}
diff --git a/collectors/plugins.d/pluginsd_parser.h b/collectors/plugins.d/pluginsd_parser.h
new file mode 100644
index 000000000..61e9c9bab
--- /dev/null
+++ b/collectors/plugins.d/pluginsd_parser.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGINSD_PARSER_H
+#define NETDATA_PLUGINSD_PARSER_H
+
+#include "../../parser/parser.h"
+
+
+typedef struct parser_user_object {
+ PARSER *parser;
+ RRDSET *st;
+ RRDHOST *host;
+ void *opaque;
+ struct plugind *cd;
+ int trust_durations;
+ struct label *new_labels;
+ size_t count;
+ int enabled;
+ uint8_t st_exists;
+ uint8_t host_exists;
+ void *private; // the user can set this for private use
+} PARSER_USER_OBJECT;
+
+extern PARSER_RC pluginsd_set_action(void *user, RRDSET *st, RRDDIM *rd, long long int value);
+extern PARSER_RC pluginsd_flush_action(void *user, RRDSET *st);
+extern PARSER_RC pluginsd_begin_action(void *user, RRDSET *st, usec_t microseconds, int trust_durations);
+extern PARSER_RC pluginsd_end_action(void *user, RRDSET *st);
+extern PARSER_RC pluginsd_chart_action(void *user, char *type, char *id, char *name, char *family, char *context,
+ char *title, char *units, char *plugin, char *module, int priority,
+ int update_every, RRDSET_TYPE chart_type, char *options);
+extern PARSER_RC pluginsd_disable_action(void *user);
+extern PARSER_RC pluginsd_variable_action(void *user, RRDHOST *host, RRDSET *st, char *name, int global,
+ calculated_number value);
+extern PARSER_RC pluginsd_dimension_action(void *user, RRDSET *st, char *id, char *name, char *algorithm,
+ long multiplier, long divisor, char *options, RRD_ALGORITHM algorithm_type);
+extern PARSER_RC pluginsd_label_action(void *user, char *key, char *value, LABEL_SOURCE source);
+extern PARSER_RC pluginsd_overwrite_action(void *user, RRDHOST *host, struct label *new_labels);
+
+
+#endif //NETDATA_PLUGINSD_PARSER_H
diff --git a/collectors/proc.plugin/Makefile.in b/collectors/proc.plugin/Makefile.in
deleted file mode 100644
index 47bb61f04..000000000
--- a/collectors/proc.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/proc.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md
index 7e2aa1096..085afb4fb 100644
--- a/collectors/proc.plugin/README.md
+++ b/collectors/proc.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "proc.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/README.md
+-->
+
# proc.plugin
- `/proc/net/dev` (all network interfaces for all their values)
@@ -6,6 +11,7 @@
- `/proc/net/snmp` (total IPv4, TCP and UDP usage)
- `/proc/net/snmp6` (total IPv6 usage)
- `/proc/net/netstat` (more IPv4 usage)
+- `/proc/net/wireless` (wireless extension)
- `/proc/net/stat/nf_conntrack` (connection tracking performance)
- `/proc/net/stat/synproxy` (synproxy performance)
- `/proc/net/ip_vs/stats` (IPVS connection statistics)
@@ -18,8 +24,10 @@
- `/proc/interrupts` (total and per core hardware interrupts)
- `/proc/softirqs` (total and per core software interrupts)
- `/proc/loadavg` (system load and total processes running)
+- `/proc/pressure/{cpu,memory,io}` (pressure stall information)
- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
- `/sys/class/power_supply` (power supply properties)
+- `/sys/class/infiniband` (infiniband interconnect)
- `ipc` (IPC semaphores and message queues)
- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
- `netdata` (internal Netdata resources utilization)
@@ -78,8 +86,8 @@ By default, Netdata will enable monitoring metrics only when they are not zero.
Netdata categorizes all block devices in 3 categories:
-1. physical disks (i.e. block devices that does not have slaves and are not partitions)
-2. virtual disks (i.e. block devices that have slaves - like RAID devices)
+1. physical disks (i.e. block devices that do not have child devices and are not partitions)
+2. virtual disks (i.e. block devices that have child devices - like RAID devices)
3. disk partitions (i.e. block devices that are part of a physical disk)
Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the Netdata configuration file.
@@ -226,13 +234,31 @@ So, to disable performance metrics for all loop devices you could add `performan
## Monitoring CPUs
-The `/proc/stat` module monitors CPU utilization, interrupts, context switches, processes started/running, thermal throttling, frequency, and idle states. It gathers this information from multiple files.
+The `/proc/stat` module monitors CPU utilization, interrupts, context switches, processes started/running, thermal
+throttling, frequency, and idle states. It gathers this information from multiple files.
-If more than 50 cores are present in a system then CPU thermal throttling, frequency, and idle state charts are disabled.
+If your system has more than 50 processors (`physical processors * cores per processor * threads per core`), the Agent
+automatically disables CPU thermal throttling, frequency, and idle state charts. To override this default, see the next
+section on configuration.
-#### configuration
+### Configuration
+
+The settings for monitoring CPUs is in the `[plugin:proc:/proc/stat]` of your `netdata.conf` file.
-`keep per core files open` option in the `[plugin:proc:/proc/stat]` configuration section allows reducing the number of file operations on multiple files.
+The `keep per core files open` option lets you reduce the number of file operations on multiple files.
+
+If your system has more than 50 processors and you would like to see the CPU thermal throttling, frequency, and idle
+state charts that are automatically disabled, you can set the following boolean options in the
+`[plugin:proc:/proc/stat]` section.
+
+```conf
+ keep per core files open = yes
+ keep cpuidle files open = yes
+ core_throttle_count = yes
+ package_throttle_count = yes
+ cpu frequency = yes
+ cpu idle states = yes
+```
### CPU frequency
@@ -295,11 +321,50 @@ each state.
By default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though).
+### Monitoring wireless network interfaces
+
+The settings for monitoring wireless is in the `[plugin:proc:/proc/net/wireless]` section of your `netdata.conf` file.
+
+```conf
+ status for all interfaces = yes
+ quality for all interfaces = yes
+ discarded packets for all interfaces = yes
+ missed beacon for all interface = yes
+```
+
+You can set the following values for each configuration option:
+
+- `auto` = enable monitoring if the collected values are not zero
+- `yes` = enable monitoring
+- `no` = disable monitoring
+
+#### Monitored wireless interface metrics
+
+- **Status**
+ The current state of the interface. This is a device-dependent option.
+
+- **Link**
+ Overall quality of the link.
+
+- **Level**
+ Received signal strength (RSSI), which indicates how strong the received signal is.
+
+- **Noise**
+ Background noise level.
+
+- **Discarded packets**
+ Discarded packets for: Number of packets received with a different NWID or ESSID (`nwid`), unable to decrypt (`crypt`), hardware was not able to properly re-assemble the link layer fragments (`frag`), packets failed to deliver (`retry`), and packets lost in relation with specific wireless operations (`misc`).
+
+- **Missed beacon**
+ Number of periodic beacons from the cell or the access point the interface has missed.
+
+#### Wireless configuration
+
#### alarms
There are several alarms defined in `health.d/net.conf`.
-The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alarms can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a slave or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alarm with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](../../health/#alarm-line-families) line in the alarm configuration. For example, if you want to disable the `inbound packets dropped` alarm for `eth0`, set `families: !eth0 *` in the alarm definition for `template: inbound_packets_dropped`.
+The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alarms can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alarm with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](/health/REFERENCE.md#alarm-line-families) line in the alarm configuration. For example, if you want to disable the `inbound packets dropped` alarm for `eth0`, set `families: !eth0 *` in the alarm definition for `template: inbound_packets_dropped`.
#### configuration
@@ -436,6 +501,48 @@ and metrics:
the corresponding `min` or `empty`, which will then always read as zero.
This way, alerts which match on these will still work.
+## Infiniband interconnect
+
+This module monitors every active Infiniband port. It provides generic counters statistics, and per-vendor hw-counters (if vendor is supported).
+
+### Monitored interface metrics
+
+Each port will have its counters metrics monitored, grouped in the following charts:
+
+- **Bandwidth usage**
+ Sent/Received data, in KB/s
+
+- **Packets Statistics**
+ Sent/Received packets, in 3 categories: total, unicast and multicast.
+
+- **Errors Statistics**
+ Many errors counters are provided, presenting statistics for:
+ - Packets: malformated, sent/received discarded by card/switch, missing ressource
+ - Link: downed, recovered, integrity error, minor error
+ - Other events: Tick Wait to send, buffer overrun
+
+If your vendor is supported, you'll also get HW-Counters statistics. These being vendor specific, please refer to their documentation.
+
+- Mellanox: [see statistics documentation](https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters)
+
+### configuration
+
+Default configuration will monitor only enabled infiniband ports, and refresh newly activated or created ports every 30 seconds
+
+```
+[plugin:proc:/sys/class/infiniband]
+ # dirname to monitor = /sys/class/infiniband
+ # bandwidth counters = yes
+ # packets counters = yes
+ # errors counters = yes
+ # hardware packets counters = auto
+ # hardware errors counters = auto
+ # monitor only ports being active = auto
+ # disable by default interfaces matching =
+ # refresh ports state every seconds = 30
+```
+
+
## IPC
### Monitored IPC metrics
diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c
index fcb1babcf..19230c09d 100644
--- a/collectors/proc.plugin/plugin_proc.c
+++ b/collectors/proc.plugin/plugin_proc.c
@@ -21,6 +21,9 @@ static struct proc_module {
{ .name = "/proc/loadavg", .dim = "loadavg", .func = do_proc_loadavg },
{ .name = "/proc/sys/kernel/random/entropy_avail", .dim = "entropy", .func = do_proc_sys_kernel_random_entropy_avail },
+ // pressure metrics
+ { .name = "/proc/pressure", .dim = "pressure", .func = do_proc_pressure },
+
// CPU metrics
{ .name = "/proc/interrupts", .dim = "interrupts", .func = do_proc_interrupts },
{ .name = "/proc/softirqs", .dim = "softirqs", .func = do_proc_softirqs },
@@ -36,6 +39,7 @@ static struct proc_module {
// network metrics
{ .name = "/proc/net/dev", .dim = "netdev", .func = do_proc_net_dev },
+ { .name = "/proc/net/wireless", .dim = "netwireless", .func = do_proc_net_wireless },
{ .name = "/proc/net/sockstat", .dim = "sockstat", .func = do_proc_net_sockstat },
{ .name = "/proc/net/sockstat6", .dim = "sockstat6", .func = do_proc_net_sockstat6 },
{ .name = "/proc/net/netstat", .dim = "netstat", .func = do_proc_net_netstat }, // this has to be before /proc/net/snmp, because there is a shared metric
@@ -44,6 +48,7 @@ static struct proc_module {
{ .name = "/proc/net/sctp/snmp", .dim = "sctp", .func = do_proc_net_sctp_snmp },
{ .name = "/proc/net/softnet_stat", .dim = "softnet", .func = do_proc_net_softnet_stat },
{ .name = "/proc/net/ip_vs/stats", .dim = "ipvs", .func = do_proc_net_ip_vs_stats },
+ { .name = "/sys/class/infiniband", .dim = "infiniband", .func = do_sys_class_infiniband },
// firewall metrics
{ .name = "/proc/net/stat/conntrack", .dim = "conntrack", .func = do_proc_net_stat_conntrack },
@@ -145,7 +150,7 @@ void *proc_main(void *ptr) {
static RRDSET *st = NULL;
if(unlikely(!st)) {
- st = rrdset_find_bytype_localhost("netdata", "plugin_proc_modules");
+ st = rrdset_find_active_bytype_localhost("netdata", "plugin_proc_modules");
if(!st) {
st = rrdset_create_localhost(
diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h
index cb9a0c5f3..108c026ab 100644
--- a/collectors/proc.plugin/plugin_proc.h
+++ b/collectors/proc.plugin/plugin_proc.h
@@ -25,6 +25,7 @@
extern void *proc_main(void *ptr);
extern int do_proc_net_dev(int update_every, usec_t dt);
+extern int do_proc_net_wireless(int update_every, usec_t dt);
extern int do_proc_diskstats(int update_every, usec_t dt);
extern int do_proc_mdstat(int update_every, usec_t dt);
extern int do_proc_net_snmp(int update_every, usec_t dt);
@@ -40,6 +41,7 @@ extern int do_proc_net_rpc_nfsd(int update_every, usec_t dt);
extern int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt);
extern int do_proc_interrupts(int update_every, usec_t dt);
extern int do_proc_softirqs(int update_every, usec_t dt);
+extern int do_proc_pressure(int update_every, usec_t dt);
extern int do_sys_kernel_mm_ksm(int update_every, usec_t dt);
extern int do_sys_block_zram(int update_every, usec_t dt);
extern int do_proc_loadavg(int update_every, usec_t dt);
@@ -56,16 +58,19 @@ extern int do_proc_net_sctp_snmp(int update_every, usec_t dt);
extern int do_ipc(int update_every, usec_t dt);
extern int do_sys_class_power_supply(int update_every, usec_t dt);
extern int do_proc_pagetypeinfo(int update_every, usec_t dt);
+extern int do_sys_class_infiniband(int update_every, usec_t dt);
extern int get_numa_node_count(void);
// metrics that need to be shared among data collectors
extern unsigned long long tcpext_TCPSynRetrans;
// netdev renames
-extern void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name);
+extern void netdev_rename_device_add(
+ const char *host_device, const char *container_device, const char *container_name, struct label *labels);
extern void netdev_rename_device_del(const char *host_device);
#include "proc_self_mountinfo.h"
+#include "proc_pressure.h"
#include "zfs_common.h"
#else // (TARGET_OS == OS_LINUX)
diff --git a/collectors/proc.plugin/proc_loadavg.c b/collectors/proc.plugin/proc_loadavg.c
index db95b1689..8b78ecc9e 100644
--- a/collectors/proc.plugin/proc_loadavg.c
+++ b/collectors/proc.plugin/proc_loadavg.c
@@ -46,6 +46,10 @@ int do_proc_loadavg(int update_every, usec_t dt) {
//unsigned long long running_processes = str2ull(procfile_lineword(ff, 0, 3));
unsigned long long active_processes = str2ull(procfile_lineword(ff, 0, 4));
+
+ //get system pid_max
+ unsigned long long max_processes = get_system_pid_max();
+ //
//unsigned long long next_pid = str2ull(procfile_lineword(ff, 0, 5));
@@ -95,6 +99,7 @@ int do_proc_loadavg(int update_every, usec_t dt) {
if(likely(do_all_processes)) {
static RRDSET *processes_chart = NULL;
static RRDDIM *rd_active = NULL;
+ static RRDSETVAR *rd_pidmax;
if(unlikely(!processes_chart)) {
processes_chart = rrdset_create_localhost(
@@ -113,10 +118,12 @@ int do_proc_loadavg(int update_every, usec_t dt) {
);
rd_active = rrddim_add(processes_chart, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_pidmax = rrdsetvar_custom_chart_variable_create(processes_chart, "pidmax");
}
else rrdset_next(processes_chart);
rrddim_set_by_pointer(processes_chart, rd_active, active_processes);
+ rrdsetvar_custom_chart_variable_set(rd_pidmax, max_processes);
rrdset_done(processes_chart);
}
diff --git a/collectors/proc.plugin/proc_mdstat.c b/collectors/proc.plugin/proc_mdstat.c
index abfd2ff12..e932453b4 100644
--- a/collectors/proc.plugin/proc_mdstat.c
+++ b/collectors/proc.plugin/proc_mdstat.c
@@ -1,641 +1,647 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_MDSTAT_NAME "/proc/mdstat"
-
-struct raid {
- int redundant;
- char *name;
- uint32_t hash;
-
- RRDDIM *rd_health;
- unsigned long long failed_disks;
-
- RRDSET *st_disks;
- RRDDIM *rd_down;
- RRDDIM *rd_inuse;
- unsigned long long total_disks;
- unsigned long long inuse_disks;
-
- RRDSET *st_operation;
- RRDDIM *rd_check;
- RRDDIM *rd_resync;
- RRDDIM *rd_recovery;
- RRDDIM *rd_reshape;
- unsigned long long check;
- unsigned long long resync;
- unsigned long long recovery;
- unsigned long long reshape;
-
- RRDSET *st_finish;
- RRDDIM *rd_finish_in;
- unsigned long long finish_in;
-
- RRDSET *st_speed;
- RRDDIM *rd_speed;
- unsigned long long speed;
-
- char *mismatch_cnt_filename;
- RRDSET *st_mismatch_cnt;
- RRDDIM *rd_mismatch_cnt;
- unsigned long long mismatch_cnt;
-
- RRDSET *st_nonredundant;
- RRDDIM *rd_nonredundant;
-};
-
-struct old_raid {
- int redundant;
- char *name;
- uint32_t hash;
- int found;
-};
-
-static inline char *remove_trailing_chars(char *s, char c) {
- while(*s) {
- if(unlikely(*s == c)) {
- *s = '\0';
- }
- s++;
- }
- return s;
-}
-
-static inline void make_chart_obsolete(char *name, const char *id_modifier) {
- char id[50 + 1];
- RRDSET *st = NULL;
-
- if(likely(name && id_modifier)) {
- snprintfz(id, 50, "mdstat.%s_%s", name, id_modifier);
- st = rrdset_find_byname_localhost(id);
- if(likely(st)) rrdset_is_obsolete(st);
- }
-}
-
-int do_proc_mdstat(int update_every, usec_t dt) {
- (void)dt;
- static procfile *ff = NULL;
- static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1, do_mismatch_config = -1;
- static int make_charts_obsolete = -1;
- static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL;
- static struct raid *raids = NULL;
- static size_t raids_allocated = 0;
- size_t raids_num = 0, raid_idx = 0, redundant_num = 0;
- static struct old_raid *old_raids = NULL;
- static size_t old_raids_allocated = 0;
- size_t old_raid_idx = 0;
-
- if(unlikely(do_health == -1)){
- do_health = config_get_boolean("plugin:proc:/proc/mdstat", "faulty devices", CONFIG_BOOLEAN_YES);
- do_nonredundant = config_get_boolean("plugin:proc:/proc/mdstat", "nonredundant arrays availability", CONFIG_BOOLEAN_YES);
- do_mismatch_config = config_get_boolean_ondemand("plugin:proc:/proc/mdstat", "mismatch count", CONFIG_BOOLEAN_AUTO);
- do_disks = config_get_boolean("plugin:proc:/proc/mdstat", "disk stats", CONFIG_BOOLEAN_YES);
- do_operations = config_get_boolean("plugin:proc:/proc/mdstat", "operation status", CONFIG_BOOLEAN_YES);
-
- make_charts_obsolete = config_get_boolean("plugin:proc:/proc/mdstat", "make charts obsolete", CONFIG_BOOLEAN_YES);
-
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/mdstat");
- mdstat_filename = config_get("plugin:proc:/proc/mdstat", "filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/md/mismatch_cnt");
- mismatch_cnt_filename = config_get("plugin:proc:/proc/mdstat", "mismatch_cnt filename to monitor", filename);
- }
-
- if(unlikely(!ff)) {
- ff = procfile_open(mdstat_filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry opening it next time
-
- size_t lines = procfile_lines(ff);
- size_t words = 0;
-
- if(unlikely(lines < 2)) {
- error("Cannot read /proc/mdstat. Expected 2 or more lines, read %zu.", lines);
- return 1;
- }
-
- // find how many raids are there
- size_t l;
- raids_num = 0;
- for(l = 1; l < lines - 2 ; l++) {
- if(unlikely(procfile_lineword(ff, l, 1)[0] == 'a')) // check if the raid is active
- raids_num++;
- }
-
- if(unlikely(!raids_num && !old_raids_allocated)) return 0; // we return 0, so that we will retry searching for raids next time
-
- // allocate the memory we need;
- if(unlikely(raids_num != raids_allocated)) {
- for(raid_idx = 0; raid_idx < raids_allocated; raid_idx++) {
- struct raid *raid = &raids[raid_idx];
- freez(raid->name);
- freez(raid->mismatch_cnt_filename);
- }
- if(raids_num) {
- raids = (struct raid *)reallocz(raids, raids_num * sizeof(struct raid));
- memset(raids, 0, raids_num * sizeof(struct raid));
- }
- else {
- freez(raids);
- raids = NULL;
- }
- raids_allocated = raids_num;
- }
-
- // loop through all lines except the first and the last ones
- for(l = 1, raid_idx = 0; l < (lines - 2) && raid_idx < raids_num; l++) {
- struct raid *raid = &raids[raid_idx];
- raid->redundant = 0;
-
- words = procfile_linewords(ff, l);
- if(unlikely(words < 2)) continue;
-
- if(unlikely(procfile_lineword(ff, l, 1)[0] != 'a')) continue;
- if(unlikely(!raid->name)) {
- raid->name = strdupz(procfile_lineword(ff, l, 0));
- raid->hash = simple_hash(raid->name);
- }
- else if(unlikely(strcmp(raid->name, procfile_lineword(ff, l, 0)))) {
- freez(raid->name);
- freez(raid->mismatch_cnt_filename);
- memset(raid, 0, sizeof(struct raid));
- raid->name = strdupz(procfile_lineword(ff, l, 0));
- raid->hash = simple_hash(raid->name);
- }
- if(unlikely(!raid->name || !raid->name[0])) continue;
- raid_idx++;
-
- // check if raid has disk status
- l++;
- words = procfile_linewords(ff, l);
- if(words < 2 || procfile_lineword(ff, l, words - 1)[0] != '[') continue;
-
- // split inuse and total number of disks
- if(likely(do_health || do_disks)) {
- char *s = NULL, *str_total = NULL, *str_inuse = NULL;
-
- s = procfile_lineword(ff, l, words - 2);
- if(unlikely(s[0] != '[')) {
- error("Cannot read /proc/mdstat raid health status. Unexpected format: missing opening bracket.");
- continue;
- }
- str_total = ++s;
- while(*s) {
- if(unlikely(*s == '/')) {
- *s = '\0';
- str_inuse = s + 1;
- }
- else if(unlikely(*s == ']')) {
- *s = '\0';
- break;
- }
- s++;
- }
- if(unlikely(str_total[0] == '\0' || !str_inuse || str_inuse[0] == '\0')) {
- error("Cannot read /proc/mdstat raid health status. Unexpected format.");
- continue;
- }
-
- raid->inuse_disks = str2ull(str_inuse);
- raid->total_disks = str2ull(str_total);
- raid->failed_disks = raid->total_disks - raid->inuse_disks;
- }
-
- raid->redundant = 1;
- redundant_num++;
- l++;
-
- // check if any operation is performed on the raid
- if(likely(do_operations)) {
- char *s = NULL;
-
- raid->check = 0;
- raid->resync = 0;
- raid->recovery = 0;
- raid->reshape = 0;
- raid->finish_in = 0;
- raid->speed = 0;
-
- words = procfile_linewords(ff, l);
- if(likely(words < 2)) continue;
- if(unlikely(procfile_lineword(ff, l, 0)[0] != '[')) continue;
- if(unlikely(words < 7)) {
- error("Cannot read /proc/mdstat line. Expected 7 params, read %zu.", words);
- continue;
- }
-
- char *word;
- word = procfile_lineword(ff, l, 3);
- remove_trailing_chars(word, '%');
-
- unsigned long long percentage = (unsigned long long)(str2ld(word, NULL) * 100);
- // possible operations: check, resync, recovery, reshape
- // 4-th character is unique for each operation so it is checked
- switch(procfile_lineword(ff, l, 1)[3]) {
- case 'c': // check
- raid->check = percentage;
- break;
- case 'y': // resync
- raid->resync = percentage;
- break;
- case 'o': // recovery
- raid->recovery = percentage;
- break;
- case 'h': // reshape
- raid->reshape = percentage;
- break;
- }
-
- word = procfile_lineword(ff, l, 5);
- s = remove_trailing_chars(word, 'm'); // remove trailing "min"
-
- word += 7; // skip leading "finish="
-
- if(likely(s > word))
- raid->finish_in = (unsigned long long)(str2ld(word, NULL) * 60);
-
- word = procfile_lineword(ff, l, 6);
- s = remove_trailing_chars(word, 'K'); // remove trailing "K/sec"
-
- word += 6; // skip leading "speed="
-
- if(likely(s > word))
- raid->speed = str2ull(word);
- }
- }
-
- // read mismatch_cnt files
- if(do_mismatch == -1) {
- if(do_mismatch_config == CONFIG_BOOLEAN_AUTO) {
- if(raids_num > 50)
- do_mismatch = CONFIG_BOOLEAN_NO;
- else
- do_mismatch = CONFIG_BOOLEAN_YES;
- }
- else
- do_mismatch = do_mismatch_config;
- }
-
- if(likely(do_mismatch)) {
- for(raid_idx = 0; raid_idx < raids_num ; raid_idx++) {
- char filename[FILENAME_MAX + 1];
- struct raid *raid = &raids[raid_idx];
-
- if(likely(raid->redundant)) {
- if(unlikely(!raid->mismatch_cnt_filename)) {
- snprintfz(filename, FILENAME_MAX, mismatch_cnt_filename, raid->name);
- raid->mismatch_cnt_filename = strdupz(filename);
- }
- if(unlikely(read_single_number_file(raid->mismatch_cnt_filename, &raid->mismatch_cnt))) {
- error("Cannot read file '%s'", raid->mismatch_cnt_filename);
- do_mismatch = CONFIG_BOOLEAN_NO;
- error("Monitoring for mismatch count has been disabled");
- break;
- }
- }
- }
- }
-
- // check for disappeared raids
- for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
- struct old_raid *old_raid = &old_raids[old_raid_idx];
- int found = 0;
-
- for(raid_idx = 0; raid_idx < raids_num ; raid_idx++) {
- struct raid *raid = &raids[raid_idx];
-
- if(unlikely(raid->hash == old_raid->hash
- && !strcmp(raid->name, old_raid->name)
- && raid->redundant == old_raid->redundant)) found = 1;
- }
-
- old_raid->found = found;
- }
-
- int raid_disappeared = 0;
- for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
- struct old_raid *old_raid = &old_raids[old_raid_idx];
-
- if(unlikely(!old_raid->found)) {
- if(likely(make_charts_obsolete)) {
- make_chart_obsolete(old_raid->name, "disks");
- make_chart_obsolete(old_raid->name, "mismatch");
- make_chart_obsolete(old_raid->name, "operation");
- make_chart_obsolete(old_raid->name, "finish");
- make_chart_obsolete(old_raid->name, "speed");
- make_chart_obsolete(old_raid->name, "availability");
- }
- raid_disappeared = 1;
- }
- }
-
- // allocate memory for nonredundant arrays
- if(unlikely(raid_disappeared || old_raids_allocated != raids_num)) {
- for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
- freez(old_raids[old_raid_idx].name);
- }
- if(likely(raids_num)) {
- old_raids = reallocz(old_raids, sizeof(struct old_raid) * raids_num);
- memset(old_raids, 0, sizeof(struct old_raid) * raids_num);
- }
- else {
- freez(old_raids);
- old_raids = NULL;
- }
- old_raids_allocated = raids_num;
- for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
- struct old_raid *old_raid = &old_raids[old_raid_idx];
- struct raid *raid = &raids[old_raid_idx];
-
- old_raid->name = strdupz(raid->name);
- old_raid->hash = raid->hash;
- old_raid->redundant = raid->redundant;
- }
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_health && redundant_num)) {
- static RRDSET *st_mdstat_health = NULL;
- if(unlikely(!st_mdstat_health)) {
- st_mdstat_health = rrdset_create_localhost(
- "mdstat"
- , "mdstat_health"
- , NULL
- , "health"
- , "md.health"
- , "Faulty Devices In MD"
- , "failed disks"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MDSTAT_NAME
- , NETDATA_CHART_PRIO_MDSTAT_HEALTH
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_isnot_obsolete(st_mdstat_health);
- }
- else
- rrdset_next(st_mdstat_health);
-
- if(!redundant_num) {
- if(likely(make_charts_obsolete)) make_chart_obsolete("mdstat", "health");
- }
- else {
- for(raid_idx = 0; raid_idx < raids_num; raid_idx++) {
- struct raid *raid = &raids[raid_idx];
-
- if(likely(raid->redundant)) {
- if(unlikely(!raid->rd_health && !(raid->rd_health = rrddim_find(st_mdstat_health, raid->name))))
- raid->rd_health = rrddim_add(st_mdstat_health, raid->name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(st_mdstat_health, raid->rd_health, raid->failed_disks);
- }
- }
-
- rrdset_done(st_mdstat_health);
- }
- }
-
- // --------------------------------------------------------------------
-
- for(raid_idx = 0; raid_idx < raids_num ; raid_idx++) {
- struct raid *raid = &raids[raid_idx];
- char id[50 + 1];
- char family[50 + 1];
-
- if(likely(raid->redundant)) {
- if(likely(do_disks)) {
- snprintfz(id, 50, "%s_disks", raid->name);
-
- if(unlikely(!raid->st_disks && !(raid->st_disks = rrdset_find_byname_localhost(id)))) {
- snprintfz(family, 50, "%s", raid->name);
-
- raid->st_disks = rrdset_create_localhost(
- "mdstat"
- , id
- , NULL
- , family
- , "md.disks"
- , "Disks Stats"
- , "disks"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MDSTAT_NAME
- , NETDATA_CHART_PRIO_MDSTAT_DISKS + raid_idx * 10
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_isnot_obsolete(raid->st_disks);
- }
- else
- rrdset_next(raid->st_disks);
-
- if(unlikely(!raid->rd_inuse && !(raid->rd_inuse = rrddim_find(raid->st_disks, "inuse"))))
- raid->rd_inuse = rrddim_add(raid->st_disks, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- if(unlikely(!raid->rd_down && !(raid->rd_down = rrddim_find(raid->st_disks, "down"))))
- raid->rd_down = rrddim_add(raid->st_disks, "down", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_disks, raid->rd_inuse, raid->inuse_disks);
- rrddim_set_by_pointer(raid->st_disks, raid->rd_down, raid->failed_disks);
-
- rrdset_done(raid->st_disks);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_mismatch)) {
- snprintfz(id, 50, "%s_mismatch", raid->name);
-
- if(unlikely(!raid->st_mismatch_cnt && !(raid->st_mismatch_cnt = rrdset_find_byname_localhost(id)))) {
- snprintfz(family, 50, "%s", raid->name);
-
- raid->st_mismatch_cnt = rrdset_create_localhost(
- "mdstat"
- , id
- , NULL
- , family
- , "md.mismatch_cnt"
- , "Mismatch Count"
- , "unsynchronized blocks"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MDSTAT_NAME
- , NETDATA_CHART_PRIO_MDSTAT_MISMATCH + raid_idx * 10
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_isnot_obsolete(raid->st_mismatch_cnt);
- }
- else
- rrdset_next(raid->st_mismatch_cnt);
-
- if(unlikely(!raid->rd_mismatch_cnt && !(raid->rd_mismatch_cnt = rrddim_find(raid->st_mismatch_cnt, "count"))))
- raid->rd_mismatch_cnt = rrddim_add(raid->st_mismatch_cnt, "count", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_mismatch_cnt, raid->rd_mismatch_cnt, raid->mismatch_cnt);
-
- rrdset_done(raid->st_mismatch_cnt);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_operations)) {
- snprintfz(id, 50, "%s_operation", raid->name);
-
- if(unlikely(!raid->st_operation && !(raid->st_operation = rrdset_find_byname_localhost(id)))) {
- snprintfz(family, 50, "%s", raid->name);
-
- raid->st_operation = rrdset_create_localhost(
- "mdstat"
- , id
- , NULL
- , family
- , "md.status"
- , "Current Status"
- , "percent"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MDSTAT_NAME
- , NETDATA_CHART_PRIO_MDSTAT_OPERATION + raid_idx * 10
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_isnot_obsolete(raid->st_operation);
- }
- else
- rrdset_next(raid->st_operation);
-
- if(unlikely(!raid->rd_check && !(raid->rd_check = rrddim_find(raid->st_operation, "check"))))
- raid->rd_check = rrddim_add(raid->st_operation, "check", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- if(unlikely(!raid->rd_resync && !(raid->rd_resync = rrddim_find(raid->st_operation, "resync"))))
- raid->rd_resync = rrddim_add(raid->st_operation, "resync", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- if(unlikely(!raid->rd_recovery && !(raid->rd_recovery = rrddim_find(raid->st_operation, "recovery"))))
- raid->rd_recovery = rrddim_add(raid->st_operation, "recovery", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- if(unlikely(!raid->rd_reshape && !(raid->rd_reshape = rrddim_find(raid->st_operation, "reshape"))))
- raid->rd_reshape = rrddim_add(raid->st_operation, "reshape", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_operation, raid->rd_check, raid->check);
- rrddim_set_by_pointer(raid->st_operation, raid->rd_resync, raid->resync);
- rrddim_set_by_pointer(raid->st_operation, raid->rd_recovery, raid->recovery);
- rrddim_set_by_pointer(raid->st_operation, raid->rd_reshape, raid->reshape);
-
- rrdset_done(raid->st_operation);
-
- // --------------------------------------------------------------------
-
- snprintfz(id, 50, "%s_finish", raid->name);
-
- if(unlikely(!raid->st_finish && !(raid->st_finish = rrdset_find_byname_localhost(id)))) {
- snprintfz(family, 50, "%s", raid->name);
-
- raid->st_finish = rrdset_create_localhost(
- "mdstat"
- , id
- , NULL
- , family
- , "md.rate"
- , "Approximate Time Unit Finish"
- , "seconds"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MDSTAT_NAME
- , NETDATA_CHART_PRIO_MDSTAT_FINISH + raid_idx * 10
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_isnot_obsolete(raid->st_finish);
- }
- else
- rrdset_next(raid->st_finish);
-
- if(unlikely(!raid->rd_finish_in && !(raid->rd_finish_in = rrddim_find(raid->st_finish, "finish_in"))))
- raid->rd_finish_in = rrddim_add(raid->st_finish, "finish_in", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_finish, raid->rd_finish_in, raid->finish_in);
-
- rrdset_done(raid->st_finish);
-
- // --------------------------------------------------------------------
-
- snprintfz(id, 50, "%s_speed", raid->name);
-
- if(unlikely(!raid->st_speed && !(raid->st_speed = rrdset_find_byname_localhost(id)))) {
- snprintfz(family, 50, "%s", raid->name);
-
- raid->st_speed = rrdset_create_localhost(
- "mdstat"
- , id
- , NULL
- , family
- , "md.rate"
- , "Operation Speed"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MDSTAT_NAME
- , NETDATA_CHART_PRIO_MDSTAT_SPEED + raid_idx * 10
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_isnot_obsolete(raid->st_speed);
- }
- else
- rrdset_next(raid->st_speed);
-
- if(unlikely(!raid->rd_speed && !(raid->rd_speed = rrddim_find(raid->st_speed, "speed"))))
- raid->rd_speed = rrddim_add(raid->st_speed, "speed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_speed, raid->rd_speed, raid->speed);
-
- rrdset_done(raid->st_speed);
- }
- }
- else {
-
- // --------------------------------------------------------------------
-
- if(likely(do_nonredundant)) {
- snprintfz(id, 50, "%s_availability", raid->name);
-
- if(unlikely(!raid->st_nonredundant && !(raid->st_nonredundant = rrdset_find_localhost(id)))) {
- snprintfz(family, 50, "%s", raid->name);
-
- raid->st_nonredundant = rrdset_create_localhost(
- "mdstat"
- , id
- , NULL
- , family
- , "md.nonredundant"
- , "Nonredundant Array Availability"
- , "boolean"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MDSTAT_NAME
- , NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT + raid_idx * 10
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_isnot_obsolete(raid->st_nonredundant);
- }
- else
- rrdset_next(raid->st_nonredundant);
-
- if(unlikely(!raid->rd_nonredundant && !(raid->rd_nonredundant = rrddim_find(raid->st_nonredundant, "available"))))
- raid->rd_nonredundant = rrddim_add(raid->st_nonredundant, "available", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_nonredundant, raid->rd_nonredundant, 1);
-
- rrdset_done(raid->st_nonredundant);
- }
- }
- }
-
- return 0;
-}
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_MDSTAT_NAME "/proc/mdstat"
+
+struct raid {
+ int redundant;
+ char *name;
+ uint32_t hash;
+
+ RRDDIM *rd_health;
+ unsigned long long failed_disks;
+
+ RRDSET *st_disks;
+ RRDDIM *rd_down;
+ RRDDIM *rd_inuse;
+ unsigned long long total_disks;
+ unsigned long long inuse_disks;
+
+ RRDSET *st_operation;
+ RRDDIM *rd_check;
+ RRDDIM *rd_resync;
+ RRDDIM *rd_recovery;
+ RRDDIM *rd_reshape;
+ unsigned long long check;
+ unsigned long long resync;
+ unsigned long long recovery;
+ unsigned long long reshape;
+
+ RRDSET *st_finish;
+ RRDDIM *rd_finish_in;
+ unsigned long long finish_in;
+
+ RRDSET *st_speed;
+ RRDDIM *rd_speed;
+ unsigned long long speed;
+
+ char *mismatch_cnt_filename;
+ RRDSET *st_mismatch_cnt;
+ RRDDIM *rd_mismatch_cnt;
+ unsigned long long mismatch_cnt;
+
+ RRDSET *st_nonredundant;
+ RRDDIM *rd_nonredundant;
+};
+
+struct old_raid {
+ int redundant;
+ char *name;
+ uint32_t hash;
+ int found;
+};
+
+static inline char *remove_trailing_chars(char *s, char c)
+{
+ while (*s) {
+ if (unlikely(*s == c)) {
+ *s = '\0';
+ }
+ s++;
+ }
+ return s;
+}
+
+static inline void make_chart_obsolete(char *name, const char *id_modifier)
+{
+ char id[50 + 1];
+ RRDSET *st = NULL;
+
+ if (likely(name && id_modifier)) {
+ snprintfz(id, 50, "mdstat.%s_%s", name, id_modifier);
+ st = rrdset_find_active_byname_localhost(id);
+ if (likely(st))
+ rrdset_is_obsolete(st);
+ }
+}
+
+int do_proc_mdstat(int update_every, usec_t dt)
+{
+ (void)dt;
+ static procfile *ff = NULL;
+ static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1,
+ do_mismatch_config = -1;
+ static int make_charts_obsolete = -1;
+ static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL;
+ static struct raid *raids = NULL;
+ static size_t raids_allocated = 0;
+ size_t raids_num = 0, raid_idx = 0, redundant_num = 0;
+ static struct old_raid *old_raids = NULL;
+ static size_t old_raids_allocated = 0;
+ size_t old_raid_idx = 0;
+
+ if (unlikely(do_health == -1)) {
+ do_health =
+ config_get_boolean("plugin:proc:/proc/mdstat", "faulty devices", CONFIG_BOOLEAN_YES);
+ do_nonredundant =
+ config_get_boolean("plugin:proc:/proc/mdstat", "nonredundant arrays availability", CONFIG_BOOLEAN_YES);
+ do_mismatch_config =
+ config_get_boolean_ondemand("plugin:proc:/proc/mdstat", "mismatch count", CONFIG_BOOLEAN_AUTO);
+ do_disks =
+ config_get_boolean("plugin:proc:/proc/mdstat", "disk stats", CONFIG_BOOLEAN_YES);
+ do_operations =
+ config_get_boolean("plugin:proc:/proc/mdstat", "operation status", CONFIG_BOOLEAN_YES);
+
+ make_charts_obsolete =
+ config_get_boolean("plugin:proc:/proc/mdstat", "make charts obsolete", CONFIG_BOOLEAN_YES);
+
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/mdstat");
+ mdstat_filename = config_get("plugin:proc:/proc/mdstat", "filename to monitor", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/md/mismatch_cnt");
+ mismatch_cnt_filename = config_get("plugin:proc:/proc/mdstat", "mismatch_cnt filename to monitor", filename);
+ }
+
+ if (unlikely(!ff)) {
+ ff = procfile_open(mdstat_filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff))
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff))
+ return 0; // we return 0, so that we will retry opening it next time
+
+ size_t lines = procfile_lines(ff);
+ size_t words = 0;
+
+ if (unlikely(lines < 2)) {
+ error("Cannot read /proc/mdstat. Expected 2 or more lines, read %zu.", lines);
+ return 1;
+ }
+
+ // find how many raids are there
+ size_t l;
+ raids_num = 0;
+ for (l = 1; l < lines - 2; l++) {
+ if (unlikely(procfile_lineword(ff, l, 1)[0] == 'a')) // check if the raid is active
+ raids_num++;
+ }
+
+ if (unlikely(!raids_num && !old_raids_allocated))
+ return 0; // we return 0, so that we will retry searching for raids next time
+
+ // allocate the memory we need;
+ if (unlikely(raids_num != raids_allocated)) {
+ for (raid_idx = 0; raid_idx < raids_allocated; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+ freez(raid->name);
+ freez(raid->mismatch_cnt_filename);
+ }
+ if (raids_num) {
+ raids = (struct raid *)reallocz(raids, raids_num * sizeof(struct raid));
+ memset(raids, 0, raids_num * sizeof(struct raid));
+ } else {
+ freez(raids);
+ raids = NULL;
+ }
+ raids_allocated = raids_num;
+ }
+
+ // loop through all lines except the first and the last ones
+ for (l = 1, raid_idx = 0; l < (lines - 2) && raid_idx < raids_num; l++) {
+ struct raid *raid = &raids[raid_idx];
+ raid->redundant = 0;
+
+ words = procfile_linewords(ff, l);
+
+ if (unlikely(words < 2))
+ continue;
+
+ if (unlikely(procfile_lineword(ff, l, 1)[0] != 'a'))
+ continue;
+
+ if (unlikely(!raid->name)) {
+ raid->name = strdupz(procfile_lineword(ff, l, 0));
+ raid->hash = simple_hash(raid->name);
+ } else if (unlikely(strcmp(raid->name, procfile_lineword(ff, l, 0)))) {
+ freez(raid->name);
+ freez(raid->mismatch_cnt_filename);
+ memset(raid, 0, sizeof(struct raid));
+ raid->name = strdupz(procfile_lineword(ff, l, 0));
+ raid->hash = simple_hash(raid->name);
+ }
+
+ if (unlikely(!raid->name || !raid->name[0]))
+ continue;
+
+ raid_idx++;
+
+ // check if raid has disk status
+ l++;
+ words = procfile_linewords(ff, l);
+ if (words < 2 || procfile_lineword(ff, l, words - 1)[0] != '[')
+ continue;
+
+ // split inuse and total number of disks
+ if (likely(do_health || do_disks)) {
+ char *s = NULL, *str_total = NULL, *str_inuse = NULL;
+
+ s = procfile_lineword(ff, l, words - 2);
+ if (unlikely(s[0] != '[')) {
+ error("Cannot read /proc/mdstat raid health status. Unexpected format: missing opening bracket.");
+ continue;
+ }
+ str_total = ++s;
+ while (*s) {
+ if (unlikely(*s == '/')) {
+ *s = '\0';
+ str_inuse = s + 1;
+ } else if (unlikely(*s == ']')) {
+ *s = '\0';
+ break;
+ }
+ s++;
+ }
+ if (unlikely(str_total[0] == '\0' || !str_inuse || str_inuse[0] == '\0')) {
+ error("Cannot read /proc/mdstat raid health status. Unexpected format.");
+ continue;
+ }
+
+ raid->inuse_disks = str2ull(str_inuse);
+ raid->total_disks = str2ull(str_total);
+ raid->failed_disks = raid->total_disks - raid->inuse_disks;
+ }
+
+ raid->redundant = 1;
+ redundant_num++;
+ l++;
+
+ // check if any operation is performed on the raid
+ if (likely(do_operations)) {
+ char *s = NULL;
+
+ raid->check = 0;
+ raid->resync = 0;
+ raid->recovery = 0;
+ raid->reshape = 0;
+ raid->finish_in = 0;
+ raid->speed = 0;
+
+ words = procfile_linewords(ff, l);
+
+ if (likely(words < 2))
+ continue;
+
+ if (unlikely(procfile_lineword(ff, l, 0)[0] != '['))
+ continue;
+
+ if (unlikely(words < 7)) {
+ error("Cannot read /proc/mdstat line. Expected 7 params, read %zu.", words);
+ continue;
+ }
+
+ char *word;
+ word = procfile_lineword(ff, l, 3);
+ remove_trailing_chars(word, '%');
+
+ unsigned long long percentage = (unsigned long long)(str2ld(word, NULL) * 100);
+ // possible operations: check, resync, recovery, reshape
+ // 4-th character is unique for each operation so it is checked
+ switch (procfile_lineword(ff, l, 1)[3]) {
+ case 'c': // check
+ raid->check = percentage;
+ break;
+ case 'y': // resync
+ raid->resync = percentage;
+ break;
+ case 'o': // recovery
+ raid->recovery = percentage;
+ break;
+ case 'h': // reshape
+ raid->reshape = percentage;
+ break;
+ }
+
+ word = procfile_lineword(ff, l, 5);
+ s = remove_trailing_chars(word, 'm'); // remove trailing "min"
+
+ word += 7; // skip leading "finish="
+
+ if (likely(s > word))
+ raid->finish_in = (unsigned long long)(str2ld(word, NULL) * 60);
+
+ word = procfile_lineword(ff, l, 6);
+ s = remove_trailing_chars(word, 'K'); // remove trailing "K/sec"
+
+ word += 6; // skip leading "speed="
+
+ if (likely(s > word))
+ raid->speed = str2ull(word);
+ }
+ }
+
+ // read mismatch_cnt files
+ if (do_mismatch == -1) {
+ if (do_mismatch_config == CONFIG_BOOLEAN_AUTO) {
+ if (raids_num > 50)
+ do_mismatch = CONFIG_BOOLEAN_NO;
+ else
+ do_mismatch = CONFIG_BOOLEAN_YES;
+ } else
+ do_mismatch = do_mismatch_config;
+ }
+
+ if (likely(do_mismatch)) {
+ for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
+ char filename[FILENAME_MAX + 1];
+ struct raid *raid = &raids[raid_idx];
+
+ if (likely(raid->redundant)) {
+ if (unlikely(!raid->mismatch_cnt_filename)) {
+ snprintfz(filename, FILENAME_MAX, mismatch_cnt_filename, raid->name);
+ raid->mismatch_cnt_filename = strdupz(filename);
+ }
+ if (unlikely(read_single_number_file(raid->mismatch_cnt_filename, &raid->mismatch_cnt))) {
+ error("Cannot read file '%s'", raid->mismatch_cnt_filename);
+ do_mismatch = CONFIG_BOOLEAN_NO;
+ error("Monitoring for mismatch count has been disabled");
+ break;
+ }
+ }
+ }
+ }
+
+ // check for disappeared raids
+ for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ struct old_raid *old_raid = &old_raids[old_raid_idx];
+ int found = 0;
+
+ for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+
+ if (unlikely(
+ raid->hash == old_raid->hash && !strcmp(raid->name, old_raid->name) &&
+ raid->redundant == old_raid->redundant))
+ found = 1;
+ }
+
+ old_raid->found = found;
+ }
+
+ int raid_disappeared = 0;
+ for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ struct old_raid *old_raid = &old_raids[old_raid_idx];
+
+ if (unlikely(!old_raid->found)) {
+ if (likely(make_charts_obsolete)) {
+ make_chart_obsolete(old_raid->name, "disks");
+ make_chart_obsolete(old_raid->name, "mismatch");
+ make_chart_obsolete(old_raid->name, "operation");
+ make_chart_obsolete(old_raid->name, "finish");
+ make_chart_obsolete(old_raid->name, "speed");
+ make_chart_obsolete(old_raid->name, "availability");
+ }
+ raid_disappeared = 1;
+ }
+ }
+
+ // allocate memory for nonredundant arrays
+ if (unlikely(raid_disappeared || old_raids_allocated != raids_num)) {
+ for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ freez(old_raids[old_raid_idx].name);
+ }
+ if (likely(raids_num)) {
+ old_raids = reallocz(old_raids, sizeof(struct old_raid) * raids_num);
+ memset(old_raids, 0, sizeof(struct old_raid) * raids_num);
+ } else {
+ freez(old_raids);
+ old_raids = NULL;
+ }
+ old_raids_allocated = raids_num;
+ for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
+ struct old_raid *old_raid = &old_raids[old_raid_idx];
+ struct raid *raid = &raids[old_raid_idx];
+
+ old_raid->name = strdupz(raid->name);
+ old_raid->hash = raid->hash;
+ old_raid->redundant = raid->redundant;
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_health && redundant_num)) {
+ static RRDSET *st_mdstat_health = NULL;
+ if (unlikely(!st_mdstat_health)) {
+ st_mdstat_health = rrdset_create_localhost(
+ "mdstat",
+ "mdstat_health",
+ NULL,
+ "health",
+ "md.health",
+ "Faulty Devices In MD",
+ "failed disks",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_HEALTH,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(st_mdstat_health);
+ } else
+ rrdset_next(st_mdstat_health);
+
+ if (!redundant_num) {
+ if (likely(make_charts_obsolete))
+ make_chart_obsolete("mdstat", "health");
+ } else {
+ for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+
+ if (likely(raid->redundant)) {
+ if (unlikely(!raid->rd_health && !(raid->rd_health = rrddim_find_active(st_mdstat_health, raid->name))))
+ raid->rd_health = rrddim_add(st_mdstat_health, raid->name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(st_mdstat_health, raid->rd_health, raid->failed_disks);
+ }
+ }
+
+ rrdset_done(st_mdstat_health);
+ }
+ }
+
+ // --------------------------------------------------------------------
+
+ for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
+ struct raid *raid = &raids[raid_idx];
+ char id[50 + 1];
+ char family[50 + 1];
+
+ if (likely(raid->redundant)) {
+ if (likely(do_disks)) {
+ snprintfz(id, 50, "%s_disks", raid->name);
+
+ if (unlikely(!raid->st_disks && !(raid->st_disks = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_disks = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.disks",
+ "Disks Stats",
+ "disks",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_DISKS + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ rrdset_isnot_obsolete(raid->st_disks);
+ } else
+ rrdset_next(raid->st_disks);
+
+ if (unlikely(!raid->rd_inuse && !(raid->rd_inuse = rrddim_find_active(raid->st_disks, "inuse"))))
+ raid->rd_inuse = rrddim_add(raid->st_disks, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ if (unlikely(!raid->rd_down && !(raid->rd_down = rrddim_find_active(raid->st_disks, "down"))))
+ raid->rd_down = rrddim_add(raid->st_disks, "down", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_disks, raid->rd_inuse, raid->inuse_disks);
+ rrddim_set_by_pointer(raid->st_disks, raid->rd_down, raid->failed_disks);
+
+ rrdset_done(raid->st_disks);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_mismatch)) {
+ snprintfz(id, 50, "%s_mismatch", raid->name);
+
+ if (unlikely(!raid->st_mismatch_cnt && !(raid->st_mismatch_cnt = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_mismatch_cnt = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.mismatch_cnt",
+ "Mismatch Count",
+ "unsynchronized blocks",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_MISMATCH + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_mismatch_cnt);
+ } else
+ rrdset_next(raid->st_mismatch_cnt);
+
+ if (unlikely(!raid->rd_mismatch_cnt && !(raid->rd_mismatch_cnt = rrddim_find_active(raid->st_mismatch_cnt, "count"))))
+ raid->rd_mismatch_cnt = rrddim_add(raid->st_mismatch_cnt, "count", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_mismatch_cnt, raid->rd_mismatch_cnt, raid->mismatch_cnt);
+
+ rrdset_done(raid->st_mismatch_cnt);
+ }
+
+ // --------------------------------------------------------------------
+
+ if (likely(do_operations)) {
+ snprintfz(id, 50, "%s_operation", raid->name);
+
+ if (unlikely(!raid->st_operation && !(raid->st_operation = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_operation = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.status",
+ "Current Status",
+ "percent",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_OPERATION + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_operation);
+ } else
+ rrdset_next(raid->st_operation);
+
+ if(unlikely(!raid->rd_check && !(raid->rd_check = rrddim_find_active(raid->st_operation, "check"))))
+ raid->rd_check = rrddim_add(raid->st_operation, "check", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_resync && !(raid->rd_resync = rrddim_find_active(raid->st_operation, "resync"))))
+ raid->rd_resync = rrddim_add(raid->st_operation, "resync", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_recovery && !(raid->rd_recovery = rrddim_find_active(raid->st_operation, "recovery"))))
+ raid->rd_recovery = rrddim_add(raid->st_operation, "recovery", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if(unlikely(!raid->rd_reshape && !(raid->rd_reshape = rrddim_find_active(raid->st_operation, "reshape"))))
+ raid->rd_reshape = rrddim_add(raid->st_operation, "reshape", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_check, raid->check);
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_resync, raid->resync);
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_recovery, raid->recovery);
+ rrddim_set_by_pointer(raid->st_operation, raid->rd_reshape, raid->reshape);
+
+ rrdset_done(raid->st_operation);
+
+ // --------------------------------------------------------------------
+
+ snprintfz(id, 50, "%s_finish", raid->name);
+
+ if (unlikely(!raid->st_finish && !(raid->st_finish = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_finish = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.rate",
+ "Approximate Time Unit Finish",
+ "seconds",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_FINISH + raid_idx * 10,
+ update_every, RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_finish);
+ } else
+ rrdset_next(raid->st_finish);
+
+ if(unlikely(!raid->rd_finish_in && !(raid->rd_finish_in = rrddim_find_active(raid->st_finish, "finish_in"))))
+ raid->rd_finish_in = rrddim_add(raid->st_finish, "finish_in", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_finish, raid->rd_finish_in, raid->finish_in);
+
+ rrdset_done(raid->st_finish);
+
+ // --------------------------------------------------------------------
+
+ snprintfz(id, 50, "%s_speed", raid->name);
+
+ if (unlikely(!raid->st_speed && !(raid->st_speed = rrdset_find_active_byname_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_speed = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.rate",
+ "Operation Speed",
+ "KiB/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_SPEED + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_speed);
+ } else
+ rrdset_next(raid->st_speed);
+
+ if (unlikely(!raid->rd_speed && !(raid->rd_speed = rrddim_find_active(raid->st_speed, "speed"))))
+ raid->rd_speed = rrddim_add(raid->st_speed, "speed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_speed, raid->rd_speed, raid->speed);
+
+ rrdset_done(raid->st_speed);
+ }
+ } else {
+ // --------------------------------------------------------------------
+
+ if (likely(do_nonredundant)) {
+ snprintfz(id, 50, "%s_availability", raid->name);
+
+ if (unlikely(!raid->st_nonredundant && !(raid->st_nonredundant = rrdset_find_active_localhost(id)))) {
+ snprintfz(family, 50, "%s", raid->name);
+
+ raid->st_nonredundant = rrdset_create_localhost(
+ "mdstat",
+ id,
+ NULL,
+ family,
+ "md.nonredundant",
+ "Nonredundant Array Availability",
+ "boolean",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_MDSTAT_NAME,
+ NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT + raid_idx * 10,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_isnot_obsolete(raid->st_nonredundant);
+ } else
+ rrdset_next(raid->st_nonredundant);
+
+ if (unlikely(!raid->rd_nonredundant && !(raid->rd_nonredundant = rrddim_find_active(raid->st_nonredundant, "available"))))
+ raid->rd_nonredundant = rrddim_add(raid->st_nonredundant, "available", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_set_by_pointer(raid->st_nonredundant, raid->rd_nonredundant, 1);
+
+ rrdset_done(raid->st_nonredundant);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c
index 92135393d..51d77fe0b 100644
--- a/collectors/proc.plugin/proc_meminfo.c
+++ b/collectors/proc.plugin/proc_meminfo.c
@@ -35,7 +35,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
Writeback = 0,
//AnonPages = 0,
//Mapped = 0,
- //Shmem = 0,
+ Shmem = 0,
Slab = 0,
SReclaimable = 0,
SUnreclaim = 0,
@@ -92,7 +92,7 @@ int do_proc_meminfo(int update_every, usec_t dt) {
arl_expect(arl_base, "Writeback", &Writeback);
//arl_expect(arl_base, "AnonPages", &AnonPages);
//arl_expect(arl_base, "Mapped", &Mapped);
- //arl_expect(arl_base, "Shmem", &Shmem);
+ arl_expect(arl_base, "Shmem", &Shmem);
arl_expect(arl_base, "Slab", &Slab);
arl_expect(arl_base, "SReclaimable", &SReclaimable);
arl_expect(arl_base, "SUnreclaim", &SUnreclaim);
@@ -145,8 +145,8 @@ int do_proc_meminfo(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- // http://stackoverflow.com/questions/3019748/how-to-reliably-measure-available-memory-in-linux
- unsigned long long MemCached = Cached + SReclaimable;
+ // http://calimeroteknik.free.fr/blag/?article20/really-used-memory-on-gnu-linux
+ unsigned long long MemCached = Cached + SReclaimable - Shmem;
unsigned long long MemUsed = MemTotal - MemFree - MemCached - Buffers;
if(do_ram) {
@@ -526,4 +526,3 @@ int do_proc_meminfo(int update_every, usec_t dt) {
return 0;
}
-
diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c
index 8d9751d1c..a90e3c3ee 100644
--- a/collectors/proc.plugin/proc_net_dev.c
+++ b/collectors/proc.plugin/proc_net_dev.c
@@ -5,6 +5,21 @@
#define PLUGIN_PROC_MODULE_NETDEV_NAME "/proc/net/dev"
#define CONFIG_SECTION_PLUGIN_PROC_NETDEV "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETDEV_NAME
+// As defined in https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-class-net
+const char *operstate_names[] = { "unknown", "notpresent", "down", "lowerlayerdown", "testing", "dormant", "up" };
+
+static inline int get_operstate(char *operstate)
+{
+ int i;
+
+ for (i = 0; i < (int) (sizeof(operstate_names) / sizeof(char *)); i++) {
+ if (!strcmp(operstate, operstate_names[i])) {
+ return i;
+ }
+ }
+ return 0;
+}
+
// ----------------------------------------------------------------------------
// netdev list
@@ -45,6 +60,8 @@ static struct netdev {
const char *chart_family;
+ struct label *chart_labels;
+
int flipped;
unsigned long priority;
@@ -67,6 +84,8 @@ static struct netdev {
kernel_uint_t tcarrier;
kernel_uint_t tcompressed;
kernel_uint_t speed;
+ kernel_uint_t duplex;
+ kernel_uint_t operstate;
// charts
RRDSET *st_bandwidth;
@@ -97,9 +116,18 @@ static struct netdev {
RRDDIM *rd_tcompressed;
usec_t speed_last_collected_usec;
+ usec_t duplex_last_collected_usec;
+ usec_t operstate_last_collected_usec;
+
char *filename_speed;
RRDSETVAR *chart_var_speed;
+ char *filename_duplex;
+ RRDSETVAR *chart_var_duplex;
+
+ char *filename_operstate;
+ RRDSETVAR *chart_var_operstate;
+
struct netdev *next;
} *netdev_root = NULL, *netdev_last_used = NULL;
@@ -166,14 +194,16 @@ static void netdev_free_chart_strings(struct netdev *d) {
static void netdev_free(struct netdev *d) {
netdev_charts_release(d);
netdev_free_chart_strings(d);
+ free_label_list(d->chart_labels);
freez((void *)d->name);
freez((void *)d->filename_speed);
+ freez((void *)d->filename_duplex);
+ freez((void *)d->filename_operstate);
freez((void *)d);
netdev_added--;
}
-
// ----------------------------------------------------------------------------
// netdev renames
@@ -184,6 +214,8 @@ static struct netdev_rename {
const char *container_device;
const char *container_name;
+ struct label *chart_labels;
+
int processed;
struct netdev_rename *next;
@@ -203,7 +235,9 @@ static struct netdev_rename *netdev_rename_find(const char *host_device, uint32_
}
// other threads can call this function to register a rename to a netdev
-void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name) {
+void netdev_rename_device_add(
+ const char *host_device, const char *container_device, const char *container_name, struct label *labels)
+{
netdata_mutex_lock(&netdev_rename_mutex);
uint32_t hash = simple_hash(host_device);
@@ -213,6 +247,7 @@ void netdev_rename_device_add(const char *host_device, const char *container_dev
r->host_device = strdupz(host_device);
r->container_device = strdupz(container_device);
r->container_name = strdupz(container_name);
+ update_label_list(&r->chart_labels, labels);
r->hash = hash;
r->next = netdev_rename_root;
r->processed = 0;
@@ -227,6 +262,9 @@ void netdev_rename_device_add(const char *host_device, const char *container_dev
r->container_device = strdupz(container_device);
r->container_name = strdupz(container_name);
+
+ update_label_list(&r->chart_labels, labels);
+
r->processed = 0;
netdev_pending_renames++;
info("CGROUP: altered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
@@ -258,6 +296,7 @@ void netdev_rename_device_del(const char *host_device) {
freez((void *) r->host_device);
freez((void *) r->container_name);
freez((void *) r->container_device);
+ free_label_list(r->chart_labels);
freez((void *) r);
break;
}
@@ -307,6 +346,8 @@ static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename *
snprintfz(buffer, RRD_ID_LENGTH_MAX, "net %s", r->container_device);
d->chart_family = strdupz(buffer);
+ update_label_list(&d->chart_labels, r->chart_labels);
+
d->priority = NETDATA_CHART_PRIO_CGROUP_NET_IFACE;
d->flipped = 1;
}
@@ -439,9 +480,15 @@ int do_proc_net_dev(int update_every, usec_t dt) {
static SIMPLE_PATTERN *disabled_list = NULL;
static procfile *ff = NULL;
static int enable_new_interfaces = -1;
- static int do_bandwidth = -1, do_packets = -1, do_errors = -1, do_drops = -1, do_fifo = -1, do_compressed = -1, do_events = -1;
- static char *path_to_sys_devices_virtual_net = NULL, *path_to_sys_class_net_speed = NULL, *proc_net_dev_filename = NULL;
+ static int do_bandwidth = -1, do_packets = -1, do_errors = -1, do_drops = -1, do_fifo = -1, do_compressed = -1,
+ do_events = -1;
+ static char *path_to_sys_devices_virtual_net = NULL, *path_to_sys_class_net_speed = NULL,
+ *proc_net_dev_filename = NULL;
+ static char *path_to_sys_class_net_duplex = NULL;
+ static char *path_to_sys_class_net_operstate = NULL;
static long long int dt_to_refresh_speed = 0;
+ static long long int dt_to_refresh_duplex = 0;
+ static long long int dt_to_refresh_operstate = 0;
if(unlikely(enable_new_interfaces == -1)) {
char filename[FILENAME_MAX + 1];
@@ -455,6 +502,13 @@ int do_proc_net_dev(int update_every, usec_t dt) {
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/speed");
path_to_sys_class_net_speed = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device speed", filename);
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/duplex");
+ path_to_sys_class_net_duplex = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device duplex", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/operstate");
+ path_to_sys_class_net_operstate = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device operstate", filename);
+
+
enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "enable new interfaces detected at runtime", CONFIG_BOOLEAN_AUTO);
do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "bandwidth for all interfaces", CONFIG_BOOLEAN_AUTO);
@@ -468,7 +522,15 @@ int do_proc_net_dev(int update_every, usec_t dt) {
disabled_list = simple_pattern_create(config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "disable by default interfaces matching", "lo fireqos* *-ifb"), NULL, SIMPLE_PATTERN_EXACT);
dt_to_refresh_speed = config_get_number(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "refresh interface speed every seconds", 10) * USEC_PER_SEC;
- if(dt_to_refresh_speed < 0) dt_to_refresh_speed = 0;
+ dt_to_refresh_duplex = config_get_number(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "refresh interface duplex every seconds", 10) * USEC_PER_SEC;
+ dt_to_refresh_operstate = config_get_number(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "refresh interface operstate every seconds", 10) * USEC_PER_SEC;
+
+ if (dt_to_refresh_operstate < 0)
+ dt_to_refresh_operstate = 0;
+ if (dt_to_refresh_duplex < 0)
+ dt_to_refresh_duplex = 0;
+ if (dt_to_refresh_speed < 0)
+ dt_to_refresh_speed = 0;
}
if(unlikely(!ff)) {
@@ -525,6 +587,12 @@ int do_proc_net_dev(int update_every, usec_t dt) {
// set the filename to get the interface speed
snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_speed, d->name);
d->filename_speed = strdupz(buffer);
+
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_duplex, d->name);
+ d->filename_duplex = strdupz(buffer);
+
+ snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_operstate, d->name);
+ d->filename_operstate = strdupz(buffer);
}
snprintfz(buffer, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name);
@@ -623,6 +691,8 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_AREA
);
+ rrdset_update_labels(d->st_bandwidth, d->chart_labels);
+
d->rd_rbytes = rrddim_add(d->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
d->rd_tbytes = rrddim_add(d->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
@@ -668,6 +738,76 @@ int do_proc_net_dev(int update_every, usec_t dt) {
}
}
}
+
+ if (d->filename_duplex) {
+ d->duplex_last_collected_usec += dt;
+
+ if (unlikely(d->duplex_last_collected_usec >= (usec_t)dt_to_refresh_duplex)) {
+ if (unlikely(!d->chart_var_duplex)) {
+ d->chart_var_duplex = rrdsetvar_custom_chart_variable_create(d->st_bandwidth, "duplex");
+ if (!d->chart_var_duplex) {
+ error("Cannot create interface %s chart variable 'duplex'. Will not update the duplex status anymore.", d->name);
+ freez(d->filename_duplex);
+ d->filename_duplex = NULL;
+ }
+ }
+
+ if (d->filename_duplex && d->chart_var_duplex) {
+ char buffer[32 + 1];
+
+ if (read_file(d->filename_duplex, buffer, 32)) {
+ error("Cannot refresh interface %s duplex state by reading '%s'. I will stop updating it.", d->name, d->filename_duplex);
+ freez(d->filename_duplex);
+ d->filename_duplex = NULL;
+ } else {
+ // values can be unknown, half or full -- just check the first letter for speed
+ if (buffer[0] == 'f')
+ d->duplex = 2;
+ else if (buffer[0] == 'h')
+ d->duplex = 1;
+ else
+ d->duplex = 0;
+
+ rrdsetvar_custom_chart_variable_set(d->chart_var_duplex, (calculated_number)d->duplex);
+ d->duplex_last_collected_usec = 0;
+ }
+ }
+ }
+ }
+
+ if (d->filename_operstate) {
+ d->operstate_last_collected_usec += dt;
+
+ if (unlikely(d->operstate_last_collected_usec >= (usec_t)dt_to_refresh_operstate)) {
+ if (unlikely(!d->chart_var_operstate)) {
+ d->chart_var_operstate = rrdsetvar_custom_chart_variable_create(d->st_bandwidth, "operstate");
+ if (!d->chart_var_operstate) {
+ error(
+ "Cannot create interface %s chart variable 'operstate'. I will stop updating it.",
+ d->name);
+ freez(d->filename_operstate);
+ d->filename_operstate = NULL;
+ }
+ }
+
+ if (d->filename_operstate && d->chart_var_operstate) {
+ char buffer[32 + 1], *trimmed_buffer;
+
+ if (read_file(d->filename_operstate, buffer, 32)) {
+ error(
+ "Cannot refresh %s operstate by reading '%s'. Will not update its status anymore.",
+ d->name, d->filename_operstate);
+ freez(d->filename_operstate);
+ d->filename_operstate = NULL;
+ } else {
+ trimmed_buffer = trim(buffer);
+ d->operstate = get_operstate(trimmed_buffer);
+ rrdsetvar_custom_chart_variable_set(d->chart_var_operstate, (calculated_number)d->operstate);
+ d->operstate_last_collected_usec = 0;
+ }
+ }
+ }
+ }
}
// --------------------------------------------------------------------
@@ -696,6 +836,8 @@ int do_proc_net_dev(int update_every, usec_t dt) {
rrdset_flag_set(d->st_packets, RRDSET_FLAG_DETAIL);
+ rrdset_update_labels(d->st_packets, d->chart_labels);
+
d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_tpackets = rrddim_add(d->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_rmulticast = rrddim_add(d->st_packets, "multicast", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -742,6 +884,8 @@ int do_proc_net_dev(int update_every, usec_t dt) {
rrdset_flag_set(d->st_errors, RRDSET_FLAG_DETAIL);
+ rrdset_update_labels(d->st_errors, d->chart_labels);
+
d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_terrors = rrddim_add(d->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -786,6 +930,8 @@ int do_proc_net_dev(int update_every, usec_t dt) {
rrdset_flag_set(d->st_drops, RRDSET_FLAG_DETAIL);
+ rrdset_update_labels(d->st_drops, d->chart_labels);
+
d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_tdrops = rrddim_add(d->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -830,6 +976,8 @@ int do_proc_net_dev(int update_every, usec_t dt) {
rrdset_flag_set(d->st_fifo, RRDSET_FLAG_DETAIL);
+ rrdset_update_labels(d->st_fifo, d->chart_labels);
+
d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_tfifo = rrddim_add(d->st_fifo, "transmit", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -874,6 +1022,8 @@ int do_proc_net_dev(int update_every, usec_t dt) {
rrdset_flag_set(d->st_compressed, RRDSET_FLAG_DETAIL);
+ rrdset_update_labels(d->st_compressed, d->chart_labels);
+
d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_tcompressed = rrddim_add(d->st_compressed, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -918,6 +1068,8 @@ int do_proc_net_dev(int update_every, usec_t dt) {
rrdset_flag_set(d->st_events, RRDSET_FLAG_DETAIL);
+ rrdset_update_labels(d->st_events, d->chart_labels);
+
d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_tcollisions = rrddim_add(d->st_events, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_tcarrier = rrddim_add(d->st_events, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/collectors/proc.plugin/proc_net_softnet_stat.c b/collectors/proc.plugin/proc_net_softnet_stat.c
index 7ec783e77..a29ccccd1 100644
--- a/collectors/proc.plugin/proc_net_softnet_stat.c
+++ b/collectors/proc.plugin/proc_net_softnet_stat.c
@@ -81,7 +81,7 @@ int do_proc_net_softnet_stat(int update_every, usec_t dt) {
// --------------------------------------------------------------------
- st = rrdset_find_bytype_localhost("system", "softnet_stat");
+ st = rrdset_find_active_bytype_localhost("system", "softnet_stat");
if(unlikely(!st)) {
st = rrdset_create_localhost(
"system"
@@ -114,7 +114,7 @@ int do_proc_net_softnet_stat(int update_every, usec_t dt) {
char id[50+1];
snprintfz(id, 50, "cpu%zu_softnet_stat", l);
- st = rrdset_find_bytype_localhost("cpu", id);
+ st = rrdset_find_active_bytype_localhost("cpu", id);
if(unlikely(!st)) {
char title[100+1];
snprintfz(title, 100, "CPU%zu softnet_stat", l);
diff --git a/collectors/proc.plugin/proc_net_wireless.c b/collectors/proc.plugin/proc_net_wireless.c
new file mode 100644
index 000000000..32a53c68f
--- /dev/null
+++ b/collectors/proc.plugin/proc_net_wireless.c
@@ -0,0 +1,453 @@
+#include <stdbool.h>
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_NETWIRELESS_NAME "/proc/net/wireless"
+
+#define CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETWIRELESS_NAME
+
+
+static struct netwireless {
+ char *name;
+ uint32_t hash;
+
+ //flags
+ bool configured;
+ struct timeval updated;
+
+ int do_status;
+ int do_quality;
+ int do_discarded_packets;
+ int do_missed_beacon;
+
+ // Data collected
+ // status
+ kernel_uint_t status;
+
+ // Quality
+ calculated_number link;
+ calculated_number level;
+ calculated_number noise;
+
+ // Discarded packets
+ kernel_uint_t nwid;
+ kernel_uint_t crypt;
+ kernel_uint_t frag;
+ kernel_uint_t retry;
+ kernel_uint_t misc;
+
+ // missed beacon
+ kernel_uint_t missed_beacon;
+
+ const char *chart_id_net_status;
+ const char *chart_id_net_link;
+ const char *chart_id_net_level;
+ const char *chart_id_net_noise;
+ const char *chart_id_net_discarded_packets;
+ const char *chart_id_net_missed_beacon;
+
+ const char *chart_family;
+
+ // charts
+ // satus
+ RRDSET *st_status;
+
+ // Quality
+ RRDSET *st_link;
+ RRDSET *st_level;
+ RRDSET *st_noise;
+
+ // Discarded Packets
+ RRDSET *st_discarded_packets;
+ // Missed beacon
+ RRDSET *st_missed_beacon;
+
+ // Dimensions
+ // status
+ RRDDIM *rd_status;
+
+ // Quality
+ RRDDIM *rd_link;
+ RRDDIM *rd_level;
+ RRDDIM *rd_noise;
+
+ // Discarded packets
+ RRDDIM *rd_nwid;
+ RRDDIM *rd_crypt;
+ RRDDIM *rd_frag;
+ RRDDIM *rd_retry;
+ RRDDIM *rd_misc;
+
+ // missed beacon
+ RRDDIM *rd_missed_beacon;
+
+ struct netwireless *next;
+} *netwireless_root = NULL;
+
+static void netwireless_free_st(struct netwireless *wireless_dev)
+{
+ if (wireless_dev->st_status) rrdset_is_obsolete(wireless_dev->st_status);
+ if (wireless_dev->st_link) rrdset_is_obsolete(wireless_dev->st_link);
+ if (wireless_dev->st_level) rrdset_is_obsolete(wireless_dev->st_level);
+ if (wireless_dev->st_noise) rrdset_is_obsolete(wireless_dev->st_noise);
+ if (wireless_dev->st_discarded_packets) rrdset_is_obsolete(wireless_dev->st_discarded_packets);
+ if (wireless_dev->st_missed_beacon) rrdset_is_obsolete(wireless_dev->st_missed_beacon);
+
+ wireless_dev->st_status = NULL;
+ wireless_dev->st_link = NULL;
+ wireless_dev->st_level = NULL;
+ wireless_dev->st_noise = NULL;
+ wireless_dev->st_discarded_packets = NULL;
+ wireless_dev->st_missed_beacon = NULL;
+}
+
+static void netwireless_free(struct netwireless *wireless_dev)
+{
+ wireless_dev->next = NULL;
+ freez((void *)wireless_dev->name);
+ netwireless_free_st(wireless_dev);
+ freez((void *)wireless_dev->chart_id_net_status);
+ freez((void *)wireless_dev->chart_id_net_link);
+ freez((void *)wireless_dev->chart_id_net_level);
+ freez((void *)wireless_dev->chart_id_net_noise);
+ freez((void *)wireless_dev->chart_id_net_discarded_packets);
+ freez((void *)wireless_dev->chart_id_net_missed_beacon);
+
+ freez((void *)wireless_dev);
+}
+
+static void netwireless_cleanup(struct timeval *timestamp)
+{
+ struct netwireless *previous = NULL;
+ struct netwireless *current;
+ // search it, from begining to the end
+ for (current = netwireless_root; current;) {
+
+ if (timercmp(&current->updated, timestamp, <)) {
+ struct netwireless *to_free = current;
+ current = current->next;
+ netwireless_free(to_free);
+
+ if (previous) {
+ previous->next = current;
+ } else {
+ netwireless_root = current;
+ }
+ } else {
+ previous = current;
+ current = current->next;
+ }
+ }
+}
+
+// finds an existing interface or creates a new entry
+static struct netwireless *find_or_create_wireless(const char *name)
+{
+ struct netwireless *wireless;
+ uint32_t hash = simple_hash(name);
+
+ // search it, from begining to the end
+ for (wireless = netwireless_root ; wireless ; wireless = wireless->next) {
+ if (unlikely(hash == wireless->hash && !strcmp(name, wireless->name))) {
+ return wireless;
+ }
+ }
+
+ // create a new one
+ wireless = callocz(1, sizeof(struct netwireless));
+ wireless->name = strdupz(name);
+ wireless->hash = hash;
+
+ // link it to the end
+ if (netwireless_root) {
+ struct netwireless *last_node;
+ for (last_node = netwireless_root; last_node->next ; last_node = last_node->next);
+
+ last_node->next = wireless;
+ } else
+ netwireless_root = wireless;
+
+ return wireless;
+}
+
+static void configure_device(int do_status, int do_quality, int do_discarded_packets, int do_missed,
+ struct netwireless *wireless_dev) {
+ wireless_dev->do_status = do_status;
+ wireless_dev->do_quality = do_quality;
+ wireless_dev->do_discarded_packets = do_discarded_packets;
+ wireless_dev->do_missed_beacon = do_missed;
+ wireless_dev->configured = true;
+
+ char buffer[RRD_ID_LENGTH_MAX + 1];
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_status", wireless_dev->name);
+ wireless_dev->chart_id_net_status = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_link_quality", wireless_dev->name);
+ wireless_dev->chart_id_net_link = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_signal_level", wireless_dev->name);
+ wireless_dev->chart_id_net_level = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_noise_level", wireless_dev->name);
+ wireless_dev->chart_id_net_noise = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_discarded_packets", wireless_dev->name);
+ wireless_dev->chart_id_net_discarded_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_missed_beacon", wireless_dev->name);
+ wireless_dev->chart_id_net_missed_beacon = strdupz(buffer);
+}
+
+int do_proc_net_wireless(int update_every, usec_t dt)
+{
+ UNUSED(dt);
+ static procfile *ff = NULL;
+ static int do_status, do_quality = -1, do_discarded_packets, do_beacon;
+ static char *proc_net_wireless_filename = NULL;
+
+ if (unlikely(do_quality == -1)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/wireless");
+
+ proc_net_wireless_filename = config_get(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,"filename to monitor",
+ filename);
+
+ do_status = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,
+ "status for all interfaces", CONFIG_BOOLEAN_AUTO);
+
+ do_quality = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,
+ "quality for all interfaces", CONFIG_BOOLEAN_AUTO);
+
+ do_discarded_packets = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,
+ "discarded packets for all interfaces",
+ CONFIG_BOOLEAN_AUTO);
+
+ do_beacon = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,
+ "missed beacon for all interface", CONFIG_BOOLEAN_AUTO);
+ }
+
+ if (unlikely(!ff)) {
+ ff = procfile_open(proc_net_wireless_filename, " \t,|", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff)) return 1;
+
+ size_t lines = procfile_lines(ff);
+ struct timeval timestamp;
+ size_t l;
+ gettimeofday(&timestamp, NULL);
+ for (l = 2; l < lines; l++) {
+ if (unlikely(procfile_linewords(ff, l) < 11)) continue;
+
+ char *name = procfile_lineword(ff, l, 0);
+ size_t len = strlen(name);
+ if (name[len - 1] == ':') name[len - 1] = '\0';
+
+ struct netwireless *wireless_dev = find_or_create_wireless(name);
+
+ if (unlikely(!wireless_dev->configured)) {
+ configure_device(do_status, do_quality, do_discarded_packets, do_beacon, wireless_dev);
+ }
+
+ if (likely(do_status != CONFIG_BOOLEAN_NO)) {
+ wireless_dev->status = str2kernel_uint_t(procfile_lineword(ff, l, 1));
+
+ if (unlikely(!wireless_dev->st_status)) {
+ wireless_dev->st_status = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_status,
+ NULL,
+ wireless_dev->name,
+ "wireless.status",
+ "Internal status reported by interface.",
+ "status",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_status, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_status = rrddim_add(wireless_dev->st_status, "status", NULL, 1,
+ 1, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(wireless_dev->st_status);
+ }
+
+ rrddim_set_by_pointer(wireless_dev->st_status, wireless_dev->rd_status,
+ (collected_number)wireless_dev->status);
+ rrdset_done(wireless_dev->st_status);
+ }
+
+ if (likely(do_quality != CONFIG_BOOLEAN_NO)) {
+ wireless_dev->link = str2ld(procfile_lineword(ff, l, 2), NULL);
+ wireless_dev->level = str2ld(procfile_lineword(ff, l, 3), NULL);
+ wireless_dev->noise = str2ld(procfile_lineword(ff, l, 4), NULL);
+
+ if (unlikely(!wireless_dev->st_link)) {
+ wireless_dev->st_link = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_link,
+ NULL,
+ wireless_dev->name,
+ "wireless.link_quality",
+ "Overall quality of the link. This is an aggregate value, and depends on the driver and hardware.",
+ "value",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 1,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_link, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_link = rrddim_add(wireless_dev->st_link, "link_quality", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(wireless_dev->st_link);
+ }
+
+ if (unlikely(!wireless_dev->st_level)) {
+ wireless_dev->st_level = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_level,
+ NULL,
+ wireless_dev->name,
+ "wireless.signal_level",
+ "The signal level is the wireless signal power level received by the wireless client. The closer the value is to 0, the stronger the signal.",
+ "dBm",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 2,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_level, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_level = rrddim_add(wireless_dev->st_level, "signal_level", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(wireless_dev->st_level);
+ }
+
+ if (unlikely(!wireless_dev->st_noise)) {
+ wireless_dev->st_noise = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_noise,
+ NULL,
+ wireless_dev->name,
+ "wireless.noise_level",
+ "The noise level indicates the amount of background noise in your environment. The closer the value to 0, the greater the noise level.",
+ "dBm",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 3,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_noise, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_noise = rrddim_add(wireless_dev->st_noise, "noise_level", NULL, 1, 1,
+ RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(wireless_dev->st_noise);
+ }
+
+ rrddim_set_by_pointer(wireless_dev->st_link, wireless_dev->rd_link,
+ (collected_number)wireless_dev->link);
+ rrdset_done(wireless_dev->st_link);
+
+ rrddim_set_by_pointer(wireless_dev->st_level, wireless_dev->rd_level,
+ (collected_number)wireless_dev->level);
+ rrdset_done(wireless_dev->st_level);
+
+ rrddim_set_by_pointer(wireless_dev->st_noise, wireless_dev->rd_noise,
+ (collected_number)wireless_dev->noise);
+ rrdset_done(wireless_dev->st_noise);
+ }
+
+ if (likely(do_discarded_packets)) {
+ wireless_dev->nwid = str2kernel_uint_t(procfile_lineword(ff, l, 5));
+ wireless_dev->crypt = str2kernel_uint_t(procfile_lineword(ff, l, 6));
+ wireless_dev->frag = str2kernel_uint_t(procfile_lineword(ff, l, 7));
+ wireless_dev->retry = str2kernel_uint_t(procfile_lineword(ff, l, 8));
+ wireless_dev->misc = str2kernel_uint_t(procfile_lineword(ff, l, 9));
+
+ if (unlikely(!wireless_dev->st_discarded_packets)) {
+ wireless_dev->st_discarded_packets = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_discarded_packets,
+ NULL,
+ wireless_dev->name,
+ "wireless.discarded_packets",
+ "Packet discarded in the wireless adapter due to \"wireless\" specific problems.",
+ "packets/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 4,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_flag_set(wireless_dev->st_discarded_packets, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_nwid = rrddim_add(wireless_dev->st_discarded_packets, "nwid", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ wireless_dev->rd_crypt = rrddim_add(wireless_dev->st_discarded_packets, "crypt", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ wireless_dev->rd_frag = rrddim_add(wireless_dev->st_discarded_packets, "frag", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ wireless_dev->rd_retry = rrddim_add(wireless_dev->st_discarded_packets, "retry", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ wireless_dev->rd_misc = rrddim_add(wireless_dev->st_discarded_packets, "misc", NULL, 1,
+ 1, RRD_ALGORITHM_INCREMENTAL);
+ } else {
+ rrdset_next(wireless_dev->st_discarded_packets);
+ }
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_nwid,
+ (collected_number)wireless_dev->nwid);
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_crypt,
+ (collected_number)wireless_dev->crypt);
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_frag,
+ (collected_number)wireless_dev->frag);
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_retry,
+ (collected_number)wireless_dev->retry);
+
+ rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_misc,
+ (collected_number)wireless_dev->misc);
+
+ rrdset_done(wireless_dev->st_discarded_packets);
+ }
+
+ if (likely(do_beacon)) {
+ wireless_dev->missed_beacon = str2kernel_uint_t(procfile_lineword(ff, l, 10));
+
+ if (unlikely(!wireless_dev->st_missed_beacon)) {
+ wireless_dev->st_missed_beacon = rrdset_create_localhost("wireless",
+ wireless_dev->chart_id_net_missed_beacon,
+ NULL,
+ wireless_dev->name,
+ "wireless.missed_beacons",
+ "Number of missed beacons.",
+ "frames/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
+ NETDATA_CHART_PRIO_WIRELESS_IFACE + 5,
+ update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_flag_set(wireless_dev->st_missed_beacon, RRDSET_FLAG_DETAIL);
+
+ wireless_dev->rd_missed_beacon = rrddim_add(wireless_dev->st_missed_beacon, "missed_beacons",
+ NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ } else {
+ rrdset_next(wireless_dev->st_missed_beacon);
+ }
+
+ rrddim_set_by_pointer(wireless_dev->st_missed_beacon, wireless_dev->rd_missed_beacon,
+ (collected_number)wireless_dev->missed_beacon);
+ rrdset_done(wireless_dev->st_missed_beacon);
+ }
+
+ wireless_dev->updated = timestamp;
+ }
+
+ netwireless_cleanup(&timestamp);
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_pressure.c b/collectors/proc.plugin/proc_pressure.c
new file mode 100644
index 000000000..4a40b4aaf
--- /dev/null
+++ b/collectors/proc.plugin/proc_pressure.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_PRESSURE_NAME "/proc/pressure"
+#define CONFIG_SECTION_PLUGIN_PROC_PRESSURE "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_PRESSURE_NAME
+
+// linux calculates this every 2 seconds, see kernel/sched/psi.c PSI_FREQ
+#define MIN_PRESSURE_UPDATE_EVERY 2
+
+
+static struct pressure resources[PRESSURE_NUM_RESOURCES] = {
+ {
+ .some = { .id = "cpu_pressure", .title = "CPU Pressure" },
+ },
+ {
+ .some = { .id = "memory_some_pressure", .title = "Memory Pressure" },
+ .full = { .id = "memory_full_pressure", .title = "Memory Full Pressure" },
+ },
+ {
+ .some = { .id = "io_some_pressure", .title = "I/O Pressure" },
+ .full = { .id = "io_full_pressure", .title = "I/O Full Pressure" },
+ },
+};
+
+static struct {
+ procfile *pf;
+ const char *name; // metric file name
+ const char *family; // webui section name
+ int section_priority;
+} resource_info[PRESSURE_NUM_RESOURCES] = {
+ { .name = "cpu", .family = "cpu", .section_priority = NETDATA_CHART_PRIO_SYSTEM_CPU },
+ { .name = "memory", .family = "ram", .section_priority = NETDATA_CHART_PRIO_SYSTEM_RAM },
+ { .name = "io", .family = "disk", .section_priority = NETDATA_CHART_PRIO_SYSTEM_IO },
+};
+
+void update_pressure_chart(struct pressure_chart *chart) {
+ rrddim_set_by_pointer(chart->st, chart->rd10, (collected_number)(chart->value10 * 100));
+ rrddim_set_by_pointer(chart->st, chart->rd60, (collected_number) (chart->value60 * 100));
+ rrddim_set_by_pointer(chart->st, chart->rd300, (collected_number) (chart->value300 * 100));
+
+ rrdset_done(chart->st);
+}
+
+int do_proc_pressure(int update_every, usec_t dt) {
+ int fail_count = 0;
+ int i;
+
+ static usec_t next_pressure_dt = 0;
+ static char *base_path = NULL;
+
+ update_every = (update_every < MIN_PRESSURE_UPDATE_EVERY) ? MIN_PRESSURE_UPDATE_EVERY : update_every;
+
+ if (next_pressure_dt <= dt) {
+ next_pressure_dt = update_every * USEC_PER_SEC;
+ } else {
+ next_pressure_dt -= dt;
+ return 0;
+ }
+
+ if (unlikely(!base_path)) {
+ base_path = config_get(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, "base path of pressure metrics", "/proc/pressure");
+ }
+
+ for (i = 0; i < PRESSURE_NUM_RESOURCES; i++) {
+ procfile *ff = resource_info[i].pf;
+ int do_some = resources[i].some.enabled, do_full = resources[i].full.enabled;
+
+ if (unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ char config_key[CONFIG_MAX_NAME + 1];
+
+ snprintfz(filename
+ , FILENAME_MAX
+ , "%s%s/%s"
+ , netdata_configured_host_prefix
+ , base_path
+ , resource_info[i].name);
+
+ snprintfz(config_key, CONFIG_MAX_NAME, "enable %s some pressure", resource_info[i].name);
+ do_some = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, config_key, CONFIG_BOOLEAN_YES);
+ resources[i].some.enabled = do_some;
+ if (resources[i].full.id) {
+ snprintfz(config_key, CONFIG_MAX_NAME, "enable %s full pressure", resource_info[i].name);
+ do_full = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, config_key, CONFIG_BOOLEAN_YES);
+ resources[i].full.enabled = do_full;
+ }
+
+ ff = procfile_open(filename, " =", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff)) {
+ error("Cannot read pressure information from %s.", filename);
+ fail_count++;
+ continue;
+ }
+ }
+
+ ff = procfile_readall(ff);
+ resource_info[i].pf = ff;
+ if (unlikely(!ff)) {
+ fail_count++;
+ continue;
+ }
+
+ size_t lines = procfile_lines(ff);
+ if (unlikely(lines < 1)) {
+ error("%s has no lines.", procfile_filename(ff));
+ fail_count++;
+ continue;
+ }
+
+ struct pressure_chart *chart;
+ if (do_some) {
+ chart = &resources[i].some;
+ if (unlikely(!chart->st)) {
+ chart->st = rrdset_create_localhost(
+ "system"
+ , chart->id
+ , NULL
+ , resource_info[i].family
+ , NULL
+ , chart->title
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_PRESSURE_NAME
+ , resource_info[i].section_priority + 40
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ chart->rd10 = rrddim_add(chart->st, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ chart->rd60 = rrddim_add(chart->st, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ chart->rd300 = rrddim_add(chart->st, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(chart->st);
+ }
+
+ chart->value10 = strtod(procfile_lineword(ff, 0, 2), NULL);
+ chart->value60 = strtod(procfile_lineword(ff, 0, 4), NULL);
+ chart->value300 = strtod(procfile_lineword(ff, 0, 6), NULL);
+ update_pressure_chart(chart);
+ }
+
+ if (do_full && lines > 2) {
+ chart = &resources[i].full;
+ if (unlikely(!chart->st)) {
+ chart->st = rrdset_create_localhost(
+ "system"
+ , chart->id
+ , NULL
+ , resource_info[i].family
+ , NULL
+ , chart->title
+ , "percentage"
+ , PLUGIN_PROC_NAME
+ , PLUGIN_PROC_MODULE_PRESSURE_NAME
+ , resource_info[i].section_priority + 45
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ chart->rd10 = rrddim_add(chart->st, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ chart->rd60 = rrddim_add(chart->st, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ chart->rd300 = rrddim_add(chart->st, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrdset_next(chart->st);
+ }
+
+ chart->value10 = strtod(procfile_lineword(ff, 1, 2), NULL);
+ chart->value60 = strtod(procfile_lineword(ff, 1, 4), NULL);
+ chart->value300 = strtod(procfile_lineword(ff, 1, 6), NULL);
+ update_pressure_chart(chart);
+ }
+ }
+
+ if (PRESSURE_NUM_RESOURCES == fail_count) {
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/collectors/proc.plugin/proc_pressure.h b/collectors/proc.plugin/proc_pressure.h
new file mode 100644
index 000000000..333021866
--- /dev/null
+++ b/collectors/proc.plugin/proc_pressure.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PROC_PRESSURE_H
+#define NETDATA_PROC_PRESSURE_H
+
+#define PRESSURE_NUM_RESOURCES 3
+
+struct pressure {
+ int updated;
+ char *filename;
+
+ struct pressure_chart {
+ int enabled;
+
+ const char *id;
+ const char *title;
+
+ double value10;
+ double value60;
+ double value300;
+
+ RRDSET *st;
+ RRDDIM *rd10;
+ RRDDIM *rd60;
+ RRDDIM *rd300;
+ } some, full;
+};
+
+extern void update_pressure_chart(struct pressure_chart *chart);
+
+#endif //NETDATA_PROC_PRESSURE_H
diff --git a/collectors/proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c
index 5e6b79fc8..373a06770 100644
--- a/collectors/proc.plugin/proc_stat.c
+++ b/collectors/proc.plugin/proc_stat.c
@@ -1003,6 +1003,7 @@ int do_proc_stat(int update_every, usec_t dt) {
else
error("Cannot read current process affinity");
+ // These threads are very ephemeral and don't need to have a specific name
if(unlikely(pthread_create(&thread, NULL, wake_cpu_thread, (void *)&core)))
error("Cannot create wake_cpu_thread");
else if(unlikely(pthread_join(thread, NULL)))
diff --git a/collectors/proc.plugin/sys_class_infiniband.c b/collectors/proc.plugin/sys_class_infiniband.c
new file mode 100644
index 000000000..46f40f2c0
--- /dev/null
+++ b/collectors/proc.plugin/sys_class_infiniband.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// Heavily inspired from proc_net_dev.c
+#include "plugin_proc.h"
+
+#define PLUGIN_PROC_MODULE_INFINIBAND_NAME "/sys/class/infiniband"
+#define CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND \
+ "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_INFINIBAND_NAME
+
+// ib_device::name[IB_DEVICE_NAME_MAX(64)] + "-" + ib_device::phys_port_cnt[u8 = 3 chars]
+#define IBNAME_MAX 68
+
+// ----------------------------------------------------------------------------
+// infiniband & omnipath standard counters
+
+// I use macro as there's no single file acting as summary, but a lot of different files, so can't use helpers like
+// procfile(). Also, omnipath generates other counters, that are not provided by infiniband
+#define FOREACH_COUNTER(GEN, ...) \
+ FOREACH_COUNTER_BYTES(GEN, __VA_ARGS__) \
+ FOREACH_COUNTER_PACKETS(GEN, __VA_ARGS__) \
+ FOREACH_COUNTER_ERRORS(GEN, __VA_ARGS__)
+
+#define FOREACH_COUNTER_BYTES(GEN, ...) \
+ GEN(port_rcv_data, bytes, "Received", 1, __VA_ARGS__) \
+ GEN(port_xmit_data, bytes, "Sent", -1, __VA_ARGS__)
+
+#define FOREACH_COUNTER_PACKETS(GEN, ...) \
+ GEN(port_rcv_packets, packets, "Received", 1, __VA_ARGS__) \
+ GEN(port_xmit_packets, packets, "Sent", -1, __VA_ARGS__) \
+ GEN(multicast_rcv_packets, packets, "Mcast rcvd", 1, __VA_ARGS__) \
+ GEN(multicast_xmit_packets, packets, "Mcast sent", -1, __VA_ARGS__) \
+ GEN(unicast_rcv_packets, packets, "Ucast rcvd", 1, __VA_ARGS__) \
+ GEN(unicast_xmit_packets, packets, "Ucast sent", -1, __VA_ARGS__)
+
+#define FOREACH_COUNTER_ERRORS(GEN, ...) \
+ GEN(port_rcv_errors, errors, "Pkts malformated", 1, __VA_ARGS__) \
+ GEN(port_rcv_constraint_errors, errors, "Pkts rcvd discarded ", 1, __VA_ARGS__) \
+ GEN(port_xmit_discards, errors, "Pkts sent discarded", 1, __VA_ARGS__) \
+ GEN(port_xmit_wait, errors, "Tick Wait to send", 1, __VA_ARGS__) \
+ GEN(VL15_dropped, errors, "Pkts missed ressource", 1, __VA_ARGS__) \
+ GEN(excessive_buffer_overrun_errors, errors, "Buffer overrun", 1, __VA_ARGS__) \
+ GEN(link_downed, errors, "Link Downed", 1, __VA_ARGS__) \
+ GEN(link_error_recovery, errors, "Link recovered", 1, __VA_ARGS__) \
+ GEN(local_link_integrity_errors, errors, "Link integrity err", 1, __VA_ARGS__) \
+ GEN(symbol_error, errors, "Link minor errors", 1, __VA_ARGS__) \
+ GEN(port_rcv_remote_physical_errors, errors, "Pkts rcvd with EBP", 1, __VA_ARGS__) \
+ GEN(port_rcv_switch_relay_errors, errors, "Pkts rcvd discarded by switch", 1, __VA_ARGS__) \
+ GEN(port_xmit_constraint_errors, errors, "Pkts sent discarded by switch", 1, __VA_ARGS__)
+
+//
+// Hardware Counters
+//
+
+// IMPORTANT: These are vendor-specific fields.
+// If you want to add a new vendor, search this for for 'VENDORS:' keyword and
+// add your definition as 'VENDOR-<key>:' where <key> if the string part that
+// is shown in /sys/class/infiniband/<key>X_Y
+// EG: for Mellanox, shown as mlx0_1, it's 'mlx'
+// for Intel, shown as hfi1_1, it's 'hfi'
+
+// VENDORS: List of implemented hardware vendors
+#define FOREACH_HWCOUNTER_NAME(GEN, ...) GEN(mlx, __VA_ARGS__)
+
+// VENDOR-MLX: HW Counters for Mellanox ConnectX Devices
+#define FOREACH_HWCOUNTER_MLX(GEN, ...) \
+ FOREACH_HWCOUNTER_MLX_PACKETS(GEN, __VA_ARGS__) \
+ FOREACH_HWCOUNTER_MLX_ERRORS(GEN, __VA_ARGS__)
+
+#define FOREACH_HWCOUNTER_MLX_PACKETS(GEN, ...) \
+ GEN(np_cnp_sent, hwpackets, "RoCEv2 Congestion sent", 1, __VA_ARGS__) \
+ GEN(np_ecn_marked_roce_packets, hwpackets, "RoCEv2 Congestion rcvd", -1, __VA_ARGS__) \
+ GEN(rp_cnp_handled, hwpackets, "IB Congestion handled", 1, __VA_ARGS__) \
+ GEN(rx_atomic_requests, hwpackets, "ATOMIC req. rcvd", 1, __VA_ARGS__) \
+ GEN(rx_dct_connect, hwpackets, "Connection req. rcvd", 1, __VA_ARGS__) \
+ GEN(rx_read_requests, hwpackets, "Read req. rcvd", 1, __VA_ARGS__) \
+ GEN(rx_write_requests, hwpackets, "Write req. rcvd", 1, __VA_ARGS__) \
+ GEN(roce_adp_retrans, hwpackets, "RoCE retrans adaptive", 1, __VA_ARGS__) \
+ GEN(roce_adp_retrans_to, hwpackets, "RoCE retrans timeout", 1, __VA_ARGS__) \
+ GEN(roce_slow_restart, hwpackets, "RoCE slow restart", 1, __VA_ARGS__) \
+ GEN(roce_slow_restart_cnps, hwpackets, "RoCE slow restart congestion", 1, __VA_ARGS__) \
+ GEN(roce_slow_restart_trans, hwpackets, "RoCE slow restart count", 1, __VA_ARGS__)
+
+#define FOREACH_HWCOUNTER_MLX_ERRORS(GEN, ...) \
+ GEN(duplicate_request, hwerrors, "Duplicated packets", -1, __VA_ARGS__) \
+ GEN(implied_nak_seq_err, hwerrors, "Pkt Seq Num gap", 1, __VA_ARGS__) \
+ GEN(local_ack_timeout_err, hwerrors, "Ack timer expired", 1, __VA_ARGS__) \
+ GEN(out_of_buffer, hwerrors, "Drop missing buffer", 1, __VA_ARGS__) \
+ GEN(out_of_sequence, hwerrors, "Drop out of sequence", 1, __VA_ARGS__) \
+ GEN(packet_seq_err, hwerrors, "NAK sequence rcvd", 1, __VA_ARGS__) \
+ GEN(req_cqe_error, hwerrors, "CQE err Req", 1, __VA_ARGS__) \
+ GEN(resp_cqe_error, hwerrors, "CQE err Resp", 1, __VA_ARGS__) \
+ GEN(req_cqe_flush_error, hwerrors, "CQE Flushed err Req", 1, __VA_ARGS__) \
+ GEN(resp_cqe_flush_error, hwerrors, "CQE Flushed err Resp", 1, __VA_ARGS__) \
+ GEN(req_remote_access_errors, hwerrors, "Remote access err Req", 1, __VA_ARGS__) \
+ GEN(resp_remote_access_errors, hwerrors, "Remote access err Resp", 1, __VA_ARGS__) \
+ GEN(req_remote_invalid_request, hwerrors, "Remote invalid req", 1, __VA_ARGS__) \
+ GEN(resp_local_length_error, hwerrors, "Local length err Resp", 1, __VA_ARGS__) \
+ GEN(rnr_nak_retry_err, hwerrors, "RNR NAK Packets", 1, __VA_ARGS__) \
+ GEN(rp_cnp_ignored, hwerrors, "CNP Pkts ignored", 1, __VA_ARGS__) \
+ GEN(rx_icrc_encapsulated, hwerrors, "RoCE ICRC Errors", 1, __VA_ARGS__)
+
+// Common definitions used more than once
+#define GEN_RRD_DIM_ADD(NAME, GRP, DESC, DIR, PORT) \
+ GEN_RRD_DIM_ADD_CUSTOM(NAME, GRP, DESC, DIR, PORT, 1, 1, RRD_ALGORITHM_INCREMENTAL)
+
+#define GEN_RRD_DIM_ADD_CUSTOM(NAME, GRP, DESC, DIR, PORT, MULT, DIV, ALGO) \
+ PORT->rd_##NAME = rrddim_add(PORT->st_##GRP, DESC, NULL, DIR * MULT, DIV, ALGO);
+
+#define GEN_RRD_DIM_ADD_HW(NAME, GRP, DESC, DIR, PORT, HW) \
+ HW->rd_##NAME = rrddim_add(PORT->st_##GRP, DESC, NULL, DIR, 1, RRD_ALGORITHM_INCREMENTAL);
+
+#define GEN_RRD_DIM_SETP(NAME, GRP, DESC, DIR, PORT) \
+ rrddim_set_by_pointer(PORT->st_##GRP, PORT->rd_##NAME, (collected_number)PORT->NAME);
+
+#define GEN_RRD_DIM_SETP_HW(NAME, GRP, DESC, DIR, PORT, HW) \
+ rrddim_set_by_pointer(PORT->st_##GRP, HW->rd_##NAME, (collected_number)HW->NAME);
+
+// https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters
+// https://community.mellanox.com/s/article/infiniband-port-counters
+static struct ibport {
+ char *name;
+ char *counters_path;
+ char *hwcounters_path;
+ int len;
+
+ // flags
+ int configured;
+ int enabled;
+ int discovered;
+
+ int do_bytes;
+ int do_packets;
+ int do_errors;
+ int do_hwpackets;
+ int do_hwerrors;
+
+ const char *chart_type_bytes;
+ const char *chart_type_packets;
+ const char *chart_type_errors;
+ const char *chart_type_hwpackets;
+ const char *chart_type_hwerrors;
+
+ const char *chart_id_bytes;
+ const char *chart_id_packets;
+ const char *chart_id_errors;
+ const char *chart_id_hwpackets;
+ const char *chart_id_hwerrors;
+
+ const char *chart_family;
+
+ unsigned long priority;
+
+ // Port details using drivers/infiniband/core/sysfs.c :: rate_show()
+ RRDDIM *rd_speed;
+ uint64_t speed;
+ uint64_t width;
+
+// Stats from /$device/ports/$portid/counters
+// as drivers/infiniband/hw/qib/qib_verbs.h
+// All uint64 except vl15_dropped, local_link_integrity_errors, excessive_buffer_overrun_errors uint32
+// Will generate 2 elements for each counter:
+// - uint64_t to store the value
+// - char* to store the filename path
+// - RRDDIM* to store the RRD Dimension
+#define GEN_DEF_COUNTER(NAME, ...) \
+ uint64_t NAME; \
+ char *file_##NAME; \
+ RRDDIM *rd_##NAME;
+ FOREACH_COUNTER(GEN_DEF_COUNTER)
+
+// Vendor specific hwcounters from /$device/ports/$portid/hw_counters
+// We will generate one struct pointer per vendor to avoid future casting
+#define GEN_DEF_HWCOUNTER_PTR(VENDOR, ...) struct ibporthw_##VENDOR *hwcounters_##VENDOR;
+ FOREACH_HWCOUNTER_NAME(GEN_DEF_HWCOUNTER_PTR)
+
+ // Function pointer to the "infiniband_hwcounters_parse_<vendor>" function
+ void (*hwcounters_parse)(struct ibport *);
+ void (*hwcounters_dorrd)(struct ibport *);
+
+ // charts and dim
+ RRDSET *st_bytes;
+ RRDSET *st_packets;
+ RRDSET *st_errors;
+ RRDSET *st_hwpackets;
+ RRDSET *st_hwerrors;
+
+ RRDSETVAR *stv_speed;
+
+ usec_t speed_last_collected_usec;
+
+ struct ibport *next;
+} *ibport_root = NULL, *ibport_last_used = NULL;
+
+// VENDORS: reading / calculation functions
+#define GEN_DEF_HWCOUNTER(NAME, ...) \
+ uint64_t NAME; \
+ char *file_##NAME; \
+ RRDDIM *rd_##NAME;
+
+#define GEN_DO_HWCOUNTER_READ(NAME, GRP, DESC, DIR, PORT, HW, ...) \
+ if (HW->file_##NAME) { \
+ if (read_single_number_file(HW->file_##NAME, (unsigned long long *)&HW->NAME)) { \
+ error("cannot read iface '%s' hwcounter '" #HW "'", PORT->name); \
+ HW->file_##NAME = NULL; \
+ } \
+ }
+
+// VENDOR-MLX: Mellanox
+struct ibporthw_mlx {
+ FOREACH_HWCOUNTER_MLX(GEN_DEF_HWCOUNTER)
+};
+void infiniband_hwcounters_parse_mlx(struct ibport *port)
+{
+ if (port->do_hwerrors != CONFIG_BOOLEAN_NO)
+ FOREACH_HWCOUNTER_MLX_ERRORS(GEN_DO_HWCOUNTER_READ, port, port->hwcounters_mlx)
+ if (port->do_hwpackets != CONFIG_BOOLEAN_NO)
+ FOREACH_HWCOUNTER_MLX_PACKETS(GEN_DO_HWCOUNTER_READ, port, port->hwcounters_mlx)
+}
+void infiniband_hwcounters_dorrd_mlx(struct ibport *port)
+{
+ if (port->do_hwerrors != CONFIG_BOOLEAN_NO) {
+ FOREACH_HWCOUNTER_MLX_ERRORS(GEN_RRD_DIM_SETP_HW, port, port->hwcounters_mlx)
+ rrdset_done(port->st_hwerrors);
+ }
+ if (port->do_hwpackets != CONFIG_BOOLEAN_NO) {
+ FOREACH_HWCOUNTER_MLX_PACKETS(GEN_RRD_DIM_SETP_HW, port, port->hwcounters_mlx)
+ rrdset_done(port->st_hwpackets);
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+static struct ibport *get_ibport(const char *dev, const char *port)
+{
+ struct ibport *p;
+
+ char name[IBNAME_MAX + 1];
+ snprintfz(name, IBNAME_MAX, "%s-%s", dev, port);
+
+ // search it, resuming from the last position in sequence
+ for (p = ibport_last_used; p; p = p->next) {
+ if (unlikely(!strcmp(name, p->name))) {
+ ibport_last_used = p->next;
+ return p;
+ }
+ }
+
+ // new round, from the beginning to the last position used this time
+ for (p = ibport_root; p != ibport_last_used; p = p->next) {
+ if (unlikely(!strcmp(name, p->name))) {
+ ibport_last_used = p->next;
+ return p;
+ }
+ }
+
+ // create a new one
+ p = callocz(1, sizeof(struct ibport));
+ p->name = strdupz(name);
+ p->len = strlen(p->name);
+
+ p->chart_type_bytes = strdupz("infiniband_cnt_bytes");
+ p->chart_type_packets = strdupz("infiniband_cnt_packets");
+ p->chart_type_errors = strdupz("infiniband_cnt_errors");
+ p->chart_type_hwpackets = strdupz("infiniband_hwc_packets");
+ p->chart_type_hwerrors = strdupz("infiniband_hwc_errors");
+
+ char buffer[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_cntbytes_%s", p->name);
+ p->chart_id_bytes = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_cntpackets_%s", p->name);
+ p->chart_id_packets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_cnterrors_%s", p->name);
+ p->chart_id_errors = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_hwcntpackets_%s", p->name);
+ p->chart_id_hwpackets = strdupz(buffer);
+
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_hwcnterrors_%s", p->name);
+ p->chart_id_hwerrors = strdupz(buffer);
+
+ p->chart_family = strdupz(p->name);
+ p->priority = NETDATA_CHART_PRIO_INFINIBAND;
+
+ // Link current ibport to last one in the list
+ if (ibport_root) {
+ struct ibport *t;
+ for (t = ibport_root; t->next; t = t->next)
+ ;
+ t->next = p;
+ } else
+ ibport_root = p;
+
+ return p;
+}
+
+int do_sys_class_infiniband(int update_every, usec_t dt)
+{
+ (void)dt;
+ static SIMPLE_PATTERN *disabled_list = NULL;
+ static int initialized = 0;
+ static int enable_new_ports = -1, enable_only_active = CONFIG_BOOLEAN_YES;
+ static int do_bytes = -1, do_packets = -1, do_errors = -1, do_hwpackets = -1, do_hwerrors = -1;
+ static char *sys_class_infiniband_dirname = NULL;
+
+ static long long int dt_to_refresh_ports = 0, last_refresh_ports_usec = 0;
+
+ if (unlikely(enable_new_ports == -1)) {
+ char dirname[FILENAME_MAX + 1];
+
+ snprintfz(dirname, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/infiniband");
+ sys_class_infiniband_dirname =
+ config_get(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "dirname to monitor", dirname);
+
+ do_bytes = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "bandwidth counters", CONFIG_BOOLEAN_YES);
+ do_packets = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "packets counters", CONFIG_BOOLEAN_YES);
+ do_errors = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "errors counters", CONFIG_BOOLEAN_YES);
+ do_hwpackets = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "hardware packets counters", CONFIG_BOOLEAN_AUTO);
+ do_hwerrors = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "hardware errors counters", CONFIG_BOOLEAN_AUTO);
+
+ enable_only_active = config_get_boolean_ondemand(
+ CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "monitor only active ports", CONFIG_BOOLEAN_AUTO);
+ disabled_list = simple_pattern_create(
+ config_get(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "disable by default interfaces matching", ""), NULL,
+ SIMPLE_PATTERN_EXACT);
+
+ dt_to_refresh_ports =
+ config_get_number(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every seconds", 30) *
+ USEC_PER_SEC;
+ if (dt_to_refresh_ports < 0)
+ dt_to_refresh_ports = 0;
+ }
+
+ // init listing of /sys/class/infiniband/ (or rediscovery)
+ if (unlikely(!initialized) || unlikely(last_refresh_ports_usec >= dt_to_refresh_ports)) {
+ // If folder does not exists, return 1 to disable
+ DIR *devices_dir = opendir(sys_class_infiniband_dirname);
+ if (unlikely(!devices_dir))
+ return 1;
+
+ // Work on all device available
+ struct dirent *dev_dent;
+ while ((dev_dent = readdir(devices_dir))) {
+ // Skip special folders
+ if (!strcmp(dev_dent->d_name, "..") || !strcmp(dev_dent->d_name, "."))
+ continue;
+
+ // /sys/class/infiniband/<dev>/ports
+ char ports_dirname[FILENAME_MAX + 1];
+ snprintfz(ports_dirname, FILENAME_MAX, "%s/%s/%s", sys_class_infiniband_dirname, dev_dent->d_name, "ports");
+
+ DIR *ports_dir = opendir(ports_dirname);
+ if (unlikely(!ports_dir))
+ continue;
+
+ struct dirent *port_dent;
+ while ((port_dent = readdir(ports_dir))) {
+ // Skip special folders
+ if (!strcmp(port_dent->d_name, "..") || !strcmp(port_dent->d_name, "."))
+ continue;
+
+ char buffer[FILENAME_MAX + 1];
+
+ // Check if counters are availablea (mandatory)
+ // /sys/class/infiniband/<device>/ports/<port>/counters
+ char counters_dirname[FILENAME_MAX + 1];
+ snprintfz(counters_dirname, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "counters");
+ DIR *counters_dir = opendir(counters_dirname);
+ // Standard counters are mandatory
+ if (!counters_dir)
+ continue;
+ closedir(counters_dir);
+
+ // Hardware Counters are optionnal, used later
+ char hwcounters_dirname[FILENAME_MAX + 1];
+ snprintfz(
+ hwcounters_dirname, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "hw_counters");
+
+ // Get new or existing ibport
+ struct ibport *p = get_ibport(dev_dent->d_name, port_dent->d_name);
+ if (!p)
+ continue;
+
+ // Prepare configuration
+ if (!p->configured) {
+ p->configured = 1;
+
+ // Enable by default, will be filtered out later
+ p->enabled = 1;
+
+ p->counters_path = strdupz(counters_dirname);
+ p->hwcounters_path = strdupz(hwcounters_dirname);
+
+ snprintfz(buffer, FILENAME_MAX, "plugin:proc:/sys/class/infiniband:%s", p->name);
+
+ // Standard counters
+ p->do_bytes = config_get_boolean_ondemand(buffer, "bytes", do_bytes);
+ p->do_packets = config_get_boolean_ondemand(buffer, "packets", do_packets);
+ p->do_errors = config_get_boolean_ondemand(buffer, "errors", do_errors);
+
+// Gen filename allocation and concatenation
+#define GEN_DO_COUNTER_NAME(NAME, GRP, DESC, DIR, PORT, ...) \
+ PORT->file_##NAME = callocz(1, strlen(PORT->counters_path) + sizeof(#NAME) + 3); \
+ strcat(PORT->file_##NAME, PORT->counters_path); \
+ strcat(PORT->file_##NAME, "/" #NAME);
+ FOREACH_COUNTER(GEN_DO_COUNTER_NAME, p)
+
+ // Check HW Counters vendor dependent
+ DIR *hwcounters_dir = opendir(hwcounters_dirname);
+ if (hwcounters_dir) {
+ // By default set standard
+ p->do_hwpackets = config_get_boolean_ondemand(buffer, "hwpackets", do_hwpackets);
+ p->do_hwerrors = config_get_boolean_ondemand(buffer, "hwerrors", do_hwerrors);
+
+// VENDORS: Set your own
+
+// Allocate the chars to the filenames
+#define GEN_DO_HWCOUNTER_NAME(NAME, GRP, DESC, DIR, PORT, HW, ...) \
+ HW->file_##NAME = callocz(1, strlen(PORT->hwcounters_path) + sizeof(#NAME) + 3); \
+ strcat(HW->file_##NAME, PORT->hwcounters_path); \
+ strcat(HW->file_##NAME, "/" #NAME);
+
+ // VENDOR-MLX: Mellanox
+ if (strncmp(dev_dent->d_name, "mlx", 3) == 0) {
+ // Allocate the vendor specific struct
+ p->hwcounters_mlx = callocz(1, sizeof(struct ibporthw_mlx));
+
+ FOREACH_HWCOUNTER_MLX(GEN_DO_HWCOUNTER_NAME, p, p->hwcounters_mlx)
+
+ // Set the function pointer for hwcounter parsing
+ p->hwcounters_parse = &infiniband_hwcounters_parse_mlx;
+ p->hwcounters_dorrd = &infiniband_hwcounters_dorrd_mlx;
+ }
+
+ // VENDOR: Unknown
+ else {
+ p->do_hwpackets = CONFIG_BOOLEAN_NO;
+ p->do_hwerrors = CONFIG_BOOLEAN_NO;
+ }
+ closedir(hwcounters_dir);
+ }
+ }
+
+ // Check port state to keep activation
+ if (enable_only_active) {
+ snprintfz(buffer, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "state");
+ unsigned long long active;
+ // File is "1: DOWN" or "4: ACTIVE", but str2ull will stop on first non-decimal char
+ read_single_number_file(buffer, &active);
+
+ // Want "IB_PORT_ACTIVE" == "4", as defined by drivers/infiniband/core/sysfs.c::state_show()
+ if (active == 4)
+ p->enabled = 1;
+ else
+ p->enabled = 0;
+ }
+
+ if (p->enabled)
+ p->enabled = !simple_pattern_matches(disabled_list, p->name);
+
+ // Check / Update the link speed & width frm "rate" file
+ // Sample output: "100 Gb/sec (4X EDR)"
+ snprintfz(buffer, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "rate");
+ char buffer_rate[65];
+ if (read_file(buffer, buffer_rate, 64)) {
+ error("Unable to read '%s'", buffer);
+ p->width = 1;
+ } else {
+ char *buffer_width = strstr(buffer_rate, "(");
+ buffer_width++;
+ // str2ull will stop on first non-decimal value
+ p->speed = str2ull(buffer_rate);
+ p->width = str2ull(buffer_width);
+ }
+
+ if (!p->discovered)
+ info(
+ "Infiniband card %s port %s at speed %lu width %lu", dev_dent->d_name, port_dent->d_name,
+ p->speed, p->width);
+
+ p->discovered = 1;
+ }
+ closedir(ports_dir);
+ }
+ closedir(devices_dir);
+
+ initialized = 1;
+ last_refresh_ports_usec = 0;
+ }
+ last_refresh_ports_usec += dt;
+
+ // Update all ports values
+ struct ibport *port;
+ for (port = ibport_root; port; port = port->next) {
+ if (!port->enabled)
+ continue;
+ //
+ // Read values from system to struct
+ //
+
+// counter from file and place it in ibport struct
+#define GEN_DO_COUNTER_READ(NAME, GRP, DESC, DIR, PORT, ...) \
+ if (PORT->file_##NAME) { \
+ if (read_single_number_file(PORT->file_##NAME, (unsigned long long *)&PORT->NAME)) { \
+ error("cannot read iface '%s' counter '" #NAME "'", PORT->name); \
+ PORT->file_##NAME = NULL; \
+ } \
+ }
+
+ // Update charts
+ if (port->do_bytes != CONFIG_BOOLEAN_NO) {
+ // Read values from sysfs
+ FOREACH_COUNTER_BYTES(GEN_DO_COUNTER_READ, port)
+
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_bytes)) {
+ port->st_bytes = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_bytes,
+ NULL,
+ port->chart_family,
+ "ib.bytes",
+ "Bandwidth usage",
+ "kilobits/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 1,
+ update_every,
+ RRDSET_TYPE_AREA);
+ // Create Dimensions
+ rrdset_flag_set(port->st_bytes, RRDSET_FLAG_DETAIL);
+ // On this chart, we want to have a KB/s so the dashboard will autoscale it
+ // The reported values are also per-lane, so we must multiply it by the width
+ FOREACH_COUNTER_BYTES(GEN_RRD_DIM_ADD_CUSTOM, port, 8 * port->width, 1024, RRD_ALGORITHM_INCREMENTAL)
+
+ port->stv_speed = rrdsetvar_custom_chart_variable_create(port->st_bytes, "link_speed");
+ } else
+ rrdset_next(port->st_bytes);
+
+ // Link read values to dimensions
+ FOREACH_COUNTER_BYTES(GEN_RRD_DIM_SETP, port)
+
+ // For link speed set only variable
+ rrdsetvar_custom_chart_variable_set(port->stv_speed, port->speed);
+
+ rrdset_done(port->st_bytes);
+ }
+
+ if (port->do_packets != CONFIG_BOOLEAN_NO) {
+ // Read values from sysfs
+ FOREACH_COUNTER_PACKETS(GEN_DO_COUNTER_READ, port)
+
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_packets)) {
+ port->st_packets = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_packets,
+ NULL,
+ port->chart_family,
+ "ib.packets",
+ "Packets Statistics",
+ "packets/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 2,
+ update_every,
+ RRDSET_TYPE_AREA);
+ // Create Dimensions
+ rrdset_flag_set(port->st_packets, RRDSET_FLAG_DETAIL);
+ FOREACH_COUNTER_PACKETS(GEN_RRD_DIM_ADD, port)
+ } else
+ rrdset_next(port->st_packets);
+
+ // Link read values to dimensions
+ FOREACH_COUNTER_PACKETS(GEN_RRD_DIM_SETP, port)
+ rrdset_done(port->st_packets);
+ }
+
+ if (port->do_errors != CONFIG_BOOLEAN_NO) {
+ // Read values from sysfs
+ FOREACH_COUNTER_ERRORS(GEN_DO_COUNTER_READ, port)
+
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_errors)) {
+ port->st_errors = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_errors,
+ NULL,
+ port->chart_family,
+ "ib.errors",
+ "Error Counters",
+ "errors/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 3,
+ update_every,
+ RRDSET_TYPE_LINE);
+ // Create Dimensions
+ rrdset_flag_set(port->st_errors, RRDSET_FLAG_DETAIL);
+ FOREACH_COUNTER_ERRORS(GEN_RRD_DIM_ADD, port)
+ } else
+ rrdset_next(port->st_errors);
+
+ // Link read values to dimensions
+ FOREACH_COUNTER_ERRORS(GEN_RRD_DIM_SETP, port)
+ rrdset_done(port->st_errors);
+ }
+
+ //
+ // HW Counters
+ //
+
+ // Call the function for parsing and setting hwcounters
+ if (port->hwcounters_parse && port->hwcounters_dorrd) {
+ // Read all values (done by vendor-specific function)
+ (*port->hwcounters_parse)(port);
+
+ if (port->do_hwerrors != CONFIG_BOOLEAN_NO) {
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_hwerrors)) {
+ port->st_hwerrors = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_hwerrors,
+ NULL,
+ port->chart_family,
+ "ib.hwerrors",
+ "Hardware Errors",
+ "errors/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 4,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_flag_set(port->st_hwerrors, RRDSET_FLAG_DETAIL);
+
+ // VENDORS: Set your selection
+
+ // VENDOR: Mellanox
+ if (strncmp(port->name, "mlx", 3) == 0) {
+ FOREACH_HWCOUNTER_MLX_ERRORS(GEN_RRD_DIM_ADD_HW, port, port->hwcounters_mlx)
+ }
+
+ // Unknown vendor, should not happen
+ else {
+ error(
+ "Unmanaged vendor for '%s', do_hwerrors should have been set to no. Please report this bug",
+ port->name);
+ port->do_hwerrors = CONFIG_BOOLEAN_NO;
+ }
+ } else
+ rrdset_next(port->st_hwerrors);
+ }
+
+ if (port->do_hwpackets != CONFIG_BOOLEAN_NO) {
+ // First creation of RRD Set (charts)
+ if (unlikely(!port->st_hwpackets)) {
+ port->st_hwpackets = rrdset_create_localhost(
+ "Infiniband",
+ port->chart_id_hwpackets,
+ NULL,
+ port->chart_family,
+ "ib.hwpackets",
+ "Hardware Packets Statistics",
+ "packets/s",
+ PLUGIN_PROC_NAME,
+ PLUGIN_PROC_MODULE_INFINIBAND_NAME,
+ port->priority + 5,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_flag_set(port->st_hwpackets, RRDSET_FLAG_DETAIL);
+
+ // VENDORS: Set your selection
+
+ // VENDOR: Mellanox
+ if (strncmp(port->name, "mlx", 3) == 0) {
+ FOREACH_HWCOUNTER_MLX_PACKETS(GEN_RRD_DIM_ADD_HW, port, port->hwcounters_mlx)
+ }
+
+ // Unknown vendor, should not happen
+ else {
+ error(
+ "Unmanaged vendor for '%s', do_hwpackets should have been set to no. Please report this bug",
+ port->name);
+ port->do_hwpackets = CONFIG_BOOLEAN_NO;
+ }
+ } else
+ rrdset_next(port->st_hwpackets);
+ }
+
+ // Update values to rrd (done by vendor-specific function)
+ (*port->hwcounters_dorrd)(port);
+ }
+ }
+
+ return 0;
+}
diff --git a/collectors/python.d.plugin/.keep b/collectors/python.d.plugin/.keep
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/python.d.plugin/.keep
+++ /dev/null
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
index cb14e3500..1de2d1d54 100644
--- a/collectors/python.d.plugin/Makefile.am
+++ b/collectors/python.d.plugin/Makefile.am
@@ -29,7 +29,6 @@ dist_python_DATA = \
userpythonconfigdir=$(configdir)/python.d
dist_userpythonconfig_DATA = \
- .keep \
$(NULL)
# Explicitly install directories to avoid permission issues due to umask
@@ -41,7 +40,9 @@ dist_pythonconfig_DATA = \
$(NULL)
include adaptec_raid/Makefile.inc
+include alarms/Makefile.inc
include am2320/Makefile.inc
+include anomalies/Makefile.inc
include apache/Makefile.inc
include beanstalk/Makefile.inc
include bind_rndc/Makefile.inc
@@ -64,6 +65,7 @@ include go_expvar/Makefile.inc
include haproxy/Makefile.inc
include hddtemp/Makefile.inc
include httpcheck/Makefile.inc
+include hpssa/Makefile.inc
include icecast/Makefile.inc
include ipfs/Makefile.inc
include isc_dhcpd/Makefile.inc
@@ -103,7 +105,6 @@ include squid/Makefile.inc
include tomcat/Makefile.inc
include tor/Makefile.inc
include traefik/Makefile.inc
-include unbound/Makefile.inc
include uwsgi/Makefile.inc
include varnish/Makefile.inc
include w1sensor/Makefile.inc
@@ -142,6 +143,7 @@ dist_third_party_DATA = \
python_modules/third_party/mcrcon.py \
python_modules/third_party/boinc_client.py \
python_modules/third_party/monotonic.py \
+ python_modules/third_party/filelock.py \
$(NULL)
pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
diff --git a/collectors/python.d.plugin/Makefile.in b/collectors/python.d.plugin/Makefile.in
deleted file mode 100644
index 36a4f0ddb..000000000
--- a/collectors/python.d.plugin/Makefile.in
+++ /dev/null
@@ -1,2092 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/python.d.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_python_SCRIPTS) $(dist_bases_DATA) \
- $(dist_bases_framework_services_DATA) $(dist_libconfig_DATA) \
- $(dist_noinst_DATA) $(dist_python_DATA) \
- $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
- $(dist_userpythonconfig_DATA) $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" \
- "$(DESTDIR)$(basesdir)" \
- "$(DESTDIR)$(bases_framework_servicesdir)" \
- "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" \
- "$(DESTDIR)$(python_urllib3dir)" \
- "$(DESTDIR)$(python_urllib3_backportsdir)" \
- "$(DESTDIR)$(python_urllib3_contribdir)" \
- "$(DESTDIR)$(python_urllib3_packagesdir)" \
- "$(DESTDIR)$(python_urllib3_securetransportdir)" \
- "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
- "$(DESTDIR)$(python_urllib3_utildir)" \
- "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" \
- "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" \
- "$(DESTDIR)$(third_partydir)" \
- "$(DESTDIR)$(userpythonconfigdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \
- $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
- $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
- $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
- $(dist_userpythonconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in \
- $(srcdir)/adaptec_raid/Makefile.inc \
- $(srcdir)/am2320/Makefile.inc $(srcdir)/apache/Makefile.inc \
- $(srcdir)/beanstalk/Makefile.inc \
- $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc \
- $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc \
- $(srcdir)/couchdb/Makefile.inc \
- $(srcdir)/dns_query_time/Makefile.inc \
- $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dockerd/Makefile.inc \
- $(srcdir)/dovecot/Makefile.inc \
- $(srcdir)/elasticsearch/Makefile.inc \
- $(srcdir)/energid/Makefile.inc $(srcdir)/example/Makefile.inc \
- $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc \
- $(srcdir)/freeradius/Makefile.inc \
- $(srcdir)/gearman/Makefile.inc \
- $(srcdir)/go_expvar/Makefile.inc \
- $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc \
- $(srcdir)/httpcheck/Makefile.inc \
- $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc \
- $(srcdir)/isc_dhcpd/Makefile.inc \
- $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc \
- $(srcdir)/megacli/Makefile.inc \
- $(srcdir)/memcached/Makefile.inc \
- $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc \
- $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc \
- $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc \
- $(srcdir)/ntpd/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc \
- $(srcdir)/openldap/Makefile.inc \
- $(srcdir)/oracledb/Makefile.inc \
- $(srcdir)/ovpn_status_log/Makefile.inc \
- $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc \
- $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc \
- $(srcdir)/powerdns/Makefile.inc \
- $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc \
- $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc \
- $(srcdir)/rethinkdbs/Makefile.inc \
- $(srcdir)/retroshare/Makefile.inc \
- $(srcdir)/riakkv/Makefile.inc $(srcdir)/samba/Makefile.inc \
- $(srcdir)/sensors/Makefile.inc \
- $(srcdir)/smartd_log/Makefile.inc \
- $(srcdir)/spigotmc/Makefile.inc \
- $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc \
- $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc \
- $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc \
- $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc \
- $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc \
- $(top_srcdir)/build/subst.inc
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- python.d.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_libconfig_DATA = \
- python.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- python.d.plugin \
- $(NULL)
-
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA = python.d.plugin.in README.md $(NULL) \
- adaptec_raid/README.md adaptec_raid/Makefile.inc \
- am2320/README.md am2320/Makefile.inc apache/README.md \
- apache/Makefile.inc beanstalk/README.md beanstalk/Makefile.inc \
- bind_rndc/README.md bind_rndc/Makefile.inc boinc/README.md \
- boinc/Makefile.inc ceph/README.md ceph/Makefile.inc \
- chrony/README.md chrony/Makefile.inc couchdb/README.md \
- couchdb/Makefile.inc dnsdist/README.md dnsdist/Makefile.inc \
- dns_query_time/README.md dns_query_time/Makefile.inc \
- dockerd/README.md dockerd/Makefile.inc dovecot/README.md \
- dovecot/Makefile.inc elasticsearch/README.md \
- elasticsearch/Makefile.inc energid/README.md \
- energid/Makefile.inc example/README.md example/Makefile.inc \
- exim/README.md exim/Makefile.inc fail2ban/README.md \
- fail2ban/Makefile.inc freeradius/README.md \
- freeradius/Makefile.inc gearman/README.md gearman/Makefile.inc \
- go_expvar/README.md go_expvar/Makefile.inc haproxy/README.md \
- haproxy/Makefile.inc hddtemp/README.md hddtemp/Makefile.inc \
- httpcheck/README.md httpcheck/Makefile.inc icecast/README.md \
- icecast/Makefile.inc ipfs/README.md ipfs/Makefile.inc \
- isc_dhcpd/README.md isc_dhcpd/Makefile.inc litespeed/README.md \
- litespeed/Makefile.inc logind/README.md logind/Makefile.inc \
- megacli/README.md megacli/Makefile.inc memcached/README.md \
- memcached/Makefile.inc mongodb/README.md mongodb/Makefile.inc \
- monit/README.md monit/Makefile.inc mysql/README.md \
- mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
- nginx_plus/README.md nginx_plus/Makefile.inc \
- nvidia_smi/README.md nvidia_smi/Makefile.inc nsd/README.md \
- nsd/Makefile.inc ntpd/README.md ntpd/Makefile.inc \
- ovpn_status_log/README.md ovpn_status_log/Makefile.inc \
- openldap/README.md openldap/Makefile.inc oracledb/README.md \
- oracledb/Makefile.inc phpfpm/README.md phpfpm/Makefile.inc \
- portcheck/README.md portcheck/Makefile.inc postfix/README.md \
- postfix/Makefile.inc postgres/README.md postgres/Makefile.inc \
- powerdns/README.md powerdns/Makefile.inc proxysql/README.md \
- proxysql/Makefile.inc puppet/README.md puppet/Makefile.inc \
- rabbitmq/README.md rabbitmq/Makefile.inc redis/README.md \
- redis/Makefile.inc rethinkdbs/README.md \
- rethinkdbs/Makefile.inc retroshare/README.md \
- retroshare/Makefile.inc riakkv/README.md riakkv/Makefile.inc \
- samba/README.md samba/Makefile.inc sensors/README.md \
- sensors/Makefile.inc smartd_log/README.md \
- smartd_log/Makefile.inc spigotmc/README.md \
- spigotmc/Makefile.inc springboot/README.md \
- springboot/Makefile.inc squid/README.md squid/Makefile.inc \
- tomcat/README.md tomcat/Makefile.inc tor/README.md \
- tor/Makefile.inc traefik/README.md traefik/Makefile.inc \
- unbound/README.md unbound/Makefile.inc uwsgi/README.md \
- uwsgi/Makefile.inc varnish/README.md varnish/Makefile.inc \
- w1sensor/README.md w1sensor/Makefile.inc web_log/README.md \
- web_log/Makefile.inc
-dist_python_SCRIPTS = \
- $(NULL)
-
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-
-# install these files
-dist_python_DATA = $(NULL) adaptec_raid/adaptec_raid.chart.py \
- am2320/am2320.chart.py apache/apache.chart.py \
- beanstalk/beanstalk.chart.py bind_rndc/bind_rndc.chart.py \
- boinc/boinc.chart.py ceph/ceph.chart.py chrony/chrony.chart.py \
- couchdb/couchdb.chart.py dnsdist/dnsdist.chart.py \
- dns_query_time/dns_query_time.chart.py \
- dockerd/dockerd.chart.py dovecot/dovecot.chart.py \
- elasticsearch/elasticsearch.chart.py energid/energid.chart.py \
- example/example.chart.py exim/exim.chart.py \
- fail2ban/fail2ban.chart.py freeradius/freeradius.chart.py \
- gearman/gearman.chart.py go_expvar/go_expvar.chart.py \
- haproxy/haproxy.chart.py hddtemp/hddtemp.chart.py \
- httpcheck/httpcheck.chart.py icecast/icecast.chart.py \
- ipfs/ipfs.chart.py isc_dhcpd/isc_dhcpd.chart.py \
- litespeed/litespeed.chart.py logind/logind.chart.py \
- megacli/megacli.chart.py memcached/memcached.chart.py \
- mongodb/mongodb.chart.py monit/monit.chart.py \
- mysql/mysql.chart.py nginx/nginx.chart.py \
- nginx_plus/nginx_plus.chart.py nvidia_smi/nvidia_smi.chart.py \
- nsd/nsd.chart.py ntpd/ntpd.chart.py \
- ovpn_status_log/ovpn_status_log.chart.py \
- openldap/openldap.chart.py oracledb/oracledb.chart.py \
- phpfpm/phpfpm.chart.py portcheck/portcheck.chart.py \
- postfix/postfix.chart.py postgres/postgres.chart.py \
- powerdns/powerdns.chart.py proxysql/proxysql.chart.py \
- puppet/puppet.chart.py rabbitmq/rabbitmq.chart.py \
- redis/redis.chart.py rethinkdbs/rethinkdbs.chart.py \
- retroshare/retroshare.chart.py riakkv/riakkv.chart.py \
- samba/samba.chart.py sensors/sensors.chart.py \
- smartd_log/smartd_log.chart.py spigotmc/spigotmc.chart.py \
- springboot/springboot.chart.py squid/squid.chart.py \
- tomcat/tomcat.chart.py tor/tor.chart.py \
- traefik/traefik.chart.py unbound/unbound.chart.py \
- uwsgi/uwsgi.chart.py varnish/varnish.chart.py \
- w1sensor/w1sensor.chart.py web_log/web_log.chart.py
-userpythonconfigdir = $(configdir)/python.d
-dist_userpythonconfig_DATA = \
- .keep \
- $(NULL)
-
-pythonconfigdir = $(libconfigdir)/python.d
-dist_pythonconfig_DATA = $(NULL) adaptec_raid/adaptec_raid.conf \
- am2320/am2320.conf apache/apache.conf beanstalk/beanstalk.conf \
- bind_rndc/bind_rndc.conf boinc/boinc.conf ceph/ceph.conf \
- chrony/chrony.conf couchdb/couchdb.conf dnsdist/dnsdist.conf \
- dns_query_time/dns_query_time.conf dockerd/dockerd.conf \
- dovecot/dovecot.conf elasticsearch/elasticsearch.conf \
- energid/energid.conf example/example.conf exim/exim.conf \
- fail2ban/fail2ban.conf freeradius/freeradius.conf \
- gearman/gearman.conf go_expvar/go_expvar.conf \
- haproxy/haproxy.conf hddtemp/hddtemp.conf \
- httpcheck/httpcheck.conf icecast/icecast.conf ipfs/ipfs.conf \
- isc_dhcpd/isc_dhcpd.conf litespeed/litespeed.conf \
- logind/logind.conf megacli/megacli.conf \
- memcached/memcached.conf mongodb/mongodb.conf monit/monit.conf \
- mysql/mysql.conf nginx/nginx.conf nginx_plus/nginx_plus.conf \
- nvidia_smi/nvidia_smi.conf nsd/nsd.conf ntpd/ntpd.conf \
- ovpn_status_log/ovpn_status_log.conf openldap/openldap.conf \
- oracledb/oracledb.conf phpfpm/phpfpm.conf \
- portcheck/portcheck.conf postfix/postfix.conf \
- postgres/postgres.conf powerdns/powerdns.conf \
- proxysql/proxysql.conf puppet/puppet.conf \
- rabbitmq/rabbitmq.conf redis/redis.conf \
- rethinkdbs/rethinkdbs.conf retroshare/retroshare.conf \
- riakkv/riakkv.conf samba/samba.conf sensors/sensors.conf \
- smartd_log/smartd_log.conf spigotmc/spigotmc.conf \
- springboot/springboot.conf squid/squid.conf tomcat/tomcat.conf \
- tor/tor.conf traefik/traefik.conf unbound/unbound.conf \
- uwsgi/uwsgi.conf varnish/varnish.conf w1sensor/w1sensor.conf \
- web_log/web_log.conf
-pythonmodulesdir = $(pythondir)/python_modules
-dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- $(NULL)
-
-basesdir = $(pythonmodulesdir)/bases
-dist_bases_DATA = \
- python_modules/bases/__init__.py \
- python_modules/bases/charts.py \
- python_modules/bases/collection.py \
- python_modules/bases/loaders.py \
- python_modules/bases/loggers.py \
- $(NULL)
-
-bases_framework_servicesdir = $(basesdir)/FrameworkServices
-dist_bases_framework_services_DATA = \
- python_modules/bases/FrameworkServices/__init__.py \
- python_modules/bases/FrameworkServices/ExecutableService.py \
- python_modules/bases/FrameworkServices/LogService.py \
- python_modules/bases/FrameworkServices/MySQLService.py \
- python_modules/bases/FrameworkServices/SimpleService.py \
- python_modules/bases/FrameworkServices/SocketService.py \
- python_modules/bases/FrameworkServices/UrlService.py \
- $(NULL)
-
-third_partydir = $(pythonmodulesdir)/third_party
-dist_third_party_DATA = \
- python_modules/third_party/__init__.py \
- python_modules/third_party/ordereddict.py \
- python_modules/third_party/lm_sensors.py \
- python_modules/third_party/mcrcon.py \
- python_modules/third_party/boinc_client.py \
- python_modules/third_party/monotonic.py \
- $(NULL)
-
-pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
-dist_pythonyaml2_DATA = \
- python_modules/pyyaml2/__init__.py \
- python_modules/pyyaml2/composer.py \
- python_modules/pyyaml2/constructor.py \
- python_modules/pyyaml2/cyaml.py \
- python_modules/pyyaml2/dumper.py \
- python_modules/pyyaml2/emitter.py \
- python_modules/pyyaml2/error.py \
- python_modules/pyyaml2/events.py \
- python_modules/pyyaml2/loader.py \
- python_modules/pyyaml2/nodes.py \
- python_modules/pyyaml2/parser.py \
- python_modules/pyyaml2/reader.py \
- python_modules/pyyaml2/representer.py \
- python_modules/pyyaml2/resolver.py \
- python_modules/pyyaml2/scanner.py \
- python_modules/pyyaml2/serializer.py \
- python_modules/pyyaml2/tokens.py \
- $(NULL)
-
-pythonyaml3dir = $(pythonmodulesdir)/pyyaml3
-dist_pythonyaml3_DATA = \
- python_modules/pyyaml3/__init__.py \
- python_modules/pyyaml3/composer.py \
- python_modules/pyyaml3/constructor.py \
- python_modules/pyyaml3/cyaml.py \
- python_modules/pyyaml3/dumper.py \
- python_modules/pyyaml3/emitter.py \
- python_modules/pyyaml3/error.py \
- python_modules/pyyaml3/events.py \
- python_modules/pyyaml3/loader.py \
- python_modules/pyyaml3/nodes.py \
- python_modules/pyyaml3/parser.py \
- python_modules/pyyaml3/reader.py \
- python_modules/pyyaml3/representer.py \
- python_modules/pyyaml3/resolver.py \
- python_modules/pyyaml3/scanner.py \
- python_modules/pyyaml3/serializer.py \
- python_modules/pyyaml3/tokens.py \
- $(NULL)
-
-python_urllib3dir = $(pythonmodulesdir)/urllib3
-dist_python_urllib3_DATA = \
- python_modules/urllib3/__init__.py \
- python_modules/urllib3/_collections.py \
- python_modules/urllib3/connection.py \
- python_modules/urllib3/connectionpool.py \
- python_modules/urllib3/exceptions.py \
- python_modules/urllib3/fields.py \
- python_modules/urllib3/filepost.py \
- python_modules/urllib3/response.py \
- python_modules/urllib3/poolmanager.py \
- python_modules/urllib3/request.py \
- $(NULL)
-
-python_urllib3_utildir = $(python_urllib3dir)/util
-dist_python_urllib3_util_DATA = \
- python_modules/urllib3/util/__init__.py \
- python_modules/urllib3/util/connection.py \
- python_modules/urllib3/util/request.py \
- python_modules/urllib3/util/response.py \
- python_modules/urllib3/util/retry.py \
- python_modules/urllib3/util/selectors.py \
- python_modules/urllib3/util/ssl_.py \
- python_modules/urllib3/util/timeout.py \
- python_modules/urllib3/util/url.py \
- python_modules/urllib3/util/wait.py \
- $(NULL)
-
-python_urllib3_packagesdir = $(python_urllib3dir)/packages
-dist_python_urllib3_packages_DATA = \
- python_modules/urllib3/packages/__init__.py \
- python_modules/urllib3/packages/ordered_dict.py \
- python_modules/urllib3/packages/six.py \
- $(NULL)
-
-python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
-dist_python_urllib3_backports_DATA = \
- python_modules/urllib3/packages/backports/__init__.py \
- python_modules/urllib3/packages/backports/makefile.py \
- $(NULL)
-
-python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
-dist_python_urllib3_ssl_match_hostname_DATA = \
- python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
- python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
- $(NULL)
-
-python_urllib3_contribdir = $(python_urllib3dir)/contrib
-dist_python_urllib3_contrib_DATA = \
- python_modules/urllib3/contrib/__init__.py \
- python_modules/urllib3/contrib/appengine.py \
- python_modules/urllib3/contrib/ntlmpool.py \
- python_modules/urllib3/contrib/pyopenssl.py \
- python_modules/urllib3/contrib/securetransport.py \
- python_modules/urllib3/contrib/socks.py \
- $(NULL)
-
-python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
-dist_python_urllib3_securetransport_DATA = \
- python_modules/urllib3/contrib/_securetransport/__init__.py \
- python_modules/urllib3/contrib/_securetransport/bindings.py \
- python_modules/urllib3/contrib/_securetransport/low_level.py \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/am2320/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/energid/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/gearman/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/oracledb/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/riakkv/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/am2320/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/energid/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/gearman/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/oracledb/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/riakkv/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__empty):
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pythonSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_basesDATA: $(dist_bases_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \
- done
-
-uninstall-dist_basesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir)
-install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \
- done
-
-uninstall-dist_bases_framework_servicesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir)
-install-dist_libconfigDATA: $(dist_libconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_libconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonDATA: $(dist_python_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \
- done
-
-uninstall-dist_pythonDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_backportsDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_contribDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_packagesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_securetransportDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_utilDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonmodulesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml2DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
-install-dist_third_partyDATA: $(dist_third_party_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \
- done
-
-uninstall-dist_third_partyDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir)
-install-dist_userpythonconfigDATA: $(dist_userpythonconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(userpythonconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(userpythonconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userpythonconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(userpythonconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_userpythonconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(userpythonconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" "$(DESTDIR)$(userpythonconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dist_third_partyDATA install-dist_userpythonconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am: install-exec-local
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonconfigDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA \
- uninstall-dist_userpythonconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
- install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
- install-dist_third_partyDATA install-dist_userpythonconfigDATA \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-exec-local install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am \
- uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonconfigDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA \
- uninstall-dist_userpythonconfigDATA
-
-.PRECIOUS: Makefile
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- -e 's#[@]registrydir_POST@#$(registrydir)#g' \
- -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(userpythonconfigdir)
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
index f38ab6783..a05bc81dd 100644
--- a/collectors/python.d.plugin/README.md
+++ b/collectors/python.d.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "python.d.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/README.md
+-->
+
# python.d.plugin
`python.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
@@ -62,6 +67,8 @@ Depending on where Netdata was installed, execute one of the following commands
Where `[module]` is the directory name under <https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin>
+**Note**: If you would like execute a collector in debug mode while it is still running by Netdata, you can pass the `nolock` CLI option to the above commands.
+
## How to write a new module
Writing new python module is simple. You just need to remember to include 5 major things:
@@ -74,7 +81,27 @@ Writing new python module is simple. You just need to remember to include 5 majo
If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to.
-For a quick start, you can look at the [example plugin](example/example.chart.py).
+For a quick start, you can look at the [example
+plugin](https://raw.githubusercontent.com/netdata/netdata/master/collectors/python.d.plugin/example/example.chart.py).
+
+**Note**: If you are working 'locally' on a new collector and would like to run it in an already installed and running Netdata (as opposed to having to install Netdata from source again with your new changes) to can copy over the relevant file to where Netdata expects it and then either `sudo service netdata restart` to have it be picked up and used by Netdata or you can just run the updated collector in debug mode by following a process like below (this assumes you have [installed Netdata from a GitHub fork](https://learn.netdata.cloud/docs/agent/packaging/installer/methods/manual) you have made to do your development on).
+
+```bash
+# clone your fork (done once at the start but shown here for clarity)
+#git clone --branch my-example-collector https://github.com/mygithubusername/netdata.git --depth=100
+# go into your netdata source folder
+cd netdata
+# git pull your latest changes (assuming you built from a fork you are using to develop on)
+git pull
+# instead of running the installer we can just copy over the updated collector files
+#sudo ./netdata-installer.sh --dont-wait
+# copy over the file you have updated locally (pretending we are working on the 'example' collector)
+sudo cp collectors/python.d.plugin/example/example.chart.py /usr/libexec/netdata/python.d/
+# become user netdata
+sudo su -s /bin/bash netdata
+# run your updated collector in debug mode to see if it works without having to reinstall netdata
+/usr/libexec/netdata/plugins.d/python.d.plugin example debug trace nolock
+```
### Global variables `ORDER` and `CHART`
@@ -197,10 +224,16 @@ For additional security it uses python `subprocess.Popen` (without `shell=True`
_Examples: `apache`, `nginx`, `tomcat`_
+_Multiple Endpoints (urls) Examples: [`rabbitmq`](/collectors/python.d.plugin/rabbitmq/README.md) (simpler) ,
+[`elasticsearch`](/collectors/python.d.plugin/elasticsearch/README.md) (threaded)_
+
+
_Variables from config file_: `url`, `user`, `pass`.
If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials.
+Please note that the config file can use different variables according to the specification of each module.
+
`_get_raw_data` returns list of utf-8 decoded strings (lines).
### SocketService
@@ -230,5 +263,6 @@ At minimum, to be buildable and testable, the PR needs to include:
- A makefile for the plugin at `collectors/python.d.plugin/<module_dir>/Makefile.inc`. Check an existing plugin for what this should look like.
- A line in `collectors/python.d.plugin/Makefile.am` including the above-mentioned makefile. Place it with the other plugin includes (please keep the includes sorted alphabetically).
- Optionally, chart information in `web/gui/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
+- Optionally, some default alarm configurations for your collector in `health/health.d/<module_name>.conf` and a line adding `<module_name>.conf` in `health/Makefile.am`.
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
index 127d595b2..d35ccecbc 100644
--- a/collectors/python.d.plugin/adaptec_raid/README.md
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -1,12 +1,24 @@
-# adaptec raid
+<!--
+title: "Adaptec RAID controller monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/adaptec_raid/README.md
+sidebar_label: "Adaptec RAID"
+-->
-Module collects logical and physical devices health metrics.
+# Adaptec RAID controller monitoring with Netdata
-**Requirements:**
+Collects logical and physical devices metrics.
-- `arcconf` program
-- `sudo` program
-- `netdata` user needs to be able to sudo the `arcconf` program without password
+## Requirements
+
+The module uses `arcconf`, which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `arcconf` as root without password.
+
+Add to `sudoers`:
+
+```
+netdata ALL=(root) NOPASSWD: /path/to/arcconf
+```
To grab stats it executes:
@@ -23,27 +35,23 @@ It produces:
4. **Physical Device Temperature**
-## prerequisite
+## Configuration
-This module uses `arcconf` which can only be executed by root. It uses
-`sudo` and assumes that it is configured such that the `netdata` user can
-execute `arcconf` as root without password.
+**adaptec_raid** is disabled by default. Should be explicitly enabled in `python.d.conf`.
-Add to `sudoers`:
-
-```
-netdata ALL=(root) NOPASSWD: /path/to/arcconf
+```yaml
+adaptec_raid: yes
```
-## configuration
-
- **adaptec_raid** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+Edit the `python.d/adaptec_raid.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-```yaml
-adaptec_raid: yes
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/adaptec_raid.conf
```
-### Screenshot:
+
![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png)
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
index 3fcb5fda8..564c2ce87 100644
--- a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
@@ -5,13 +5,11 @@
import re
-
from copy import deepcopy
from bases.FrameworkServices.ExecutableService import ExecutableService
from bases.collection import find_binary
-
disabled_by_default = True
update_every = 5
diff --git a/collectors/charts.d.plugin/apache/Makefile.inc b/collectors/python.d.plugin/alarms/Makefile.inc
index 4b360eae0..c2de11724 100644
--- a/collectors/charts.d.plugin/apache/Makefile.inc
+++ b/collectors/python.d.plugin/alarms/Makefile.inc
@@ -5,9 +5,9 @@
# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
# install these files
-dist_charts_DATA += apache/apache.chart.sh
-dist_chartsconfig_DATA += apache/apache.conf
+dist_python_DATA += alarms/alarms.chart.py
+dist_pythonconfig_DATA += alarms/alarms.conf
# do not install these files, but include them in the distribution
-dist_noinst_DATA += apache/README.md apache/Makefile.inc
+dist_noinst_DATA += alarms/README.md alarms/Makefile.inc
diff --git a/collectors/python.d.plugin/alarms/README.md b/collectors/python.d.plugin/alarms/README.md
new file mode 100644
index 000000000..ea96061cc
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Alarms"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/alarms/README.md
+-->
+
+# Alarms - graphing Netdata alarm states over time
+
+This collector creates an 'Alarms' menu with one line plot showing alarm states over time. Alarm states are mapped to integer values according to the below default mapping. Any alarm status types not in this mapping will be ignored (Note: This mapping can be changed by editing the `status_map` in the `alarms.conf` file). If you would like to learn more about the different alarm statuses check out the docs [here](https://learn.netdata.cloud/docs/agent/health/reference#alarm-statuses).
+
+```
+{
+ 'CLEAR': 0,
+ 'WARNING': 1,
+ 'CRITICAL': 2
+}
+```
+
+## Charts
+
+Below is an example of the chart produced when running `stress-ng --all 2` for a few minutes. You can see the various warning and critical alarms raised.
+
+![alarms collector](https://user-images.githubusercontent.com/1153921/101641493-0b086a80-39ef-11eb-9f55-0713e5dfb19f.png)
+
+## Configuration
+
+Enable the collector and restart Netdata.
+
+```bash
+cd /etc/netdata/
+sudo ./edit-config python.d.conf
+# Set `alarms: no` to `alarms: yes`
+sudo systemctl restart netdata
+```
+
+If needed, edit the `python.d/alarms.conf` configuration file using `edit-config` from the your agent's [config
+directory](/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/alarms.conf
+```
+
+The `alarms` specific part of the `alarms.conf` file should look like this:
+
+```yaml
+# what url to pull data from
+local:
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ # define how to map alarm status to numbers for the chart
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
+```
+
+It will default to pulling all alarms at each time step from the Netdata rest api at `http://127.0.0.1:19999/api/v1/alarms?all`
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Falarms%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/alarms/alarms.chart.py b/collectors/python.d.plugin/alarms/alarms.chart.py
new file mode 100644
index 000000000..973a1f382
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/alarms.chart.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Description: alarms netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+update_every = 10
+disabled_by_default = True
+
+
+def charts_template(sm):
+ order = [
+ 'alarms',
+ ]
+
+ mappings = ', '.join(['{0}={1}'.format(k, v) for k, v in sm.items()])
+ charts = {
+ 'alarms': {
+ 'options': [None, 'Alarms ({0})'.format(mappings), 'status', 'alarms', 'alarms.status', 'line'],
+ 'lines': [],
+ 'variables': [
+ ['alarms_num'],
+ ]
+ }
+ }
+ return order, charts
+
+
+DEFAULT_STATUS_MAP = {'CLEAR': 0, 'WARNING': 1, 'CRITICAL': 2}
+
+DEFAULT_URL = 'http://127.0.0.1:19999/api/v1/alarms?all'
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.sm = self.configuration.get('status_map', DEFAULT_STATUS_MAP)
+ self.order, self.definitions = charts_template(self.sm)
+ self.url = self.configuration.get('url', DEFAULT_URL)
+ self.collected_alarms = set()
+
+ def _get_data(self):
+ raw_data = self._get_raw_data()
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+ alarms = raw_data.get('alarms', {})
+
+ data = {a: self.sm[alarms[a]['status']] for a in alarms if alarms[a]['status'] in self.sm}
+ self.update_charts(alarms, data)
+ data['alarms_num'] = len(data)
+
+ return data
+
+ def update_charts(self, alarms, data):
+ if not self.charts:
+ return
+
+ for a in data:
+ if a not in self.collected_alarms:
+ self.collected_alarms.add(a)
+ self.charts['alarms'].add_dimension([a, a, 'absolute', '1', '1'])
+
+ for a in list(self.collected_alarms):
+ if a not in alarms:
+ self.collected_alarms.remove(a)
+ self.charts['alarms'].del_dimension(a, hide=False)
diff --git a/collectors/python.d.plugin/alarms/alarms.conf b/collectors/python.d.plugin/alarms/alarms.conf
new file mode 100644
index 000000000..fd7780c59
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/alarms.conf
@@ -0,0 +1,50 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# what url to pull data from
+local:
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ # define how to map alarm status to numbers for the chart
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
diff --git a/collectors/python.d.plugin/am2320/README.md b/collectors/python.d.plugin/am2320/README.md
index 709575221..14ddaa735 100644
--- a/collectors/python.d.plugin/am2320/README.md
+++ b/collectors/python.d.plugin/am2320/README.md
@@ -1,7 +1,14 @@
-# AM2320
-This module will display a graph of the temperture and humity from a AM2320 sensor.
+<!--
+title: "AM2320 sensor monitoring with netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/am2320/README.md
+sidebar_label: "AM2320"
+-->
-**Requirements:**
+# AM2320 sensor monitoring with netdata
+
+Displays a graph of the temperature and humidity from a AM2320 sensor.
+
+## Requirements
- Adafruit Circuit Python AM2320 library
- Adafruit AM2320 I2C sensor
- Python 3 (Adafruit libraries are not Python 2.x compatible)
@@ -11,12 +18,20 @@ It produces the following charts:
1. **Temperature**
2. **Humidity**
-## configuration
+## Configuration
+
+Edit the `python.d/am2320.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-Raspbery Pi Instructions:
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/am2320.conf
+```
+
+Raspberry Pi Instructions:
Hardware install:
-Connect the am2320 to the Raspbery Pi I2C pins
+Connect the am2320 to the Raspberry Pi I2C pins
Raspberry Pi 3B/4 Pins:
@@ -25,7 +40,7 @@ Raspberry Pi 3B/4 Pins:
- Board GND (pin 6) to sensor GND (pin 3)
- Board SCL (pin 5) to sensor SCL (pin 4)
-You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesnt hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.
+You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.
Software install:
- `sudo pip3 install adafruit-circuitpython-am2320`
@@ -35,3 +50,5 @@ Software install:
- save the file.
- restart the netdata service.
- check the dashboard.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fam2320%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/am2320/am2320.chart.py b/collectors/python.d.plugin/am2320/am2320.chart.py
index c15e16eee..8e66544bd 100644
--- a/collectors/python.d.plugin/am2320/am2320.chart.py
+++ b/collectors/python.d.plugin/am2320/am2320.chart.py
@@ -7,14 +7,13 @@ try:
import board
import busio
import adafruit_am2320
+
HAS_AM2320 = True
except ImportError:
HAS_AM2320 = False
-
from bases.FrameworkServices.SimpleService import SimpleService
-
ORDER = [
'temperature',
'humidity',
@@ -60,9 +59,9 @@ class Service(SimpleService):
def get_data(self):
try:
return {
- 'temperature': self.am.temperature,
- 'humidity': self.am.relative_humidity,
- }
+ 'temperature': self.am.temperature,
+ 'humidity': self.am.relative_humidity,
+ }
except (OSError, RuntimeError) as error:
self.error(error)
diff --git a/collectors/python.d.plugin/am2320/am2320.conf b/collectors/python.d.plugin/am2320/am2320.conf
index 982f5cd0a..c6b9885fc 100644
--- a/collectors/python.d.plugin/am2320/am2320.conf
+++ b/collectors/python.d.plugin/am2320/am2320.conf
@@ -1,4 +1,4 @@
-# netdata python.d.plugin configuration for am2320 temperture/humity sensor
+# netdata python.d.plugin configuration for am2320 temperature/humidity sensor
#
# This file is in YaML format. Generally the format is:
#
diff --git a/collectors/charts.d.plugin/cpufreq/Makefile.inc b/collectors/python.d.plugin/anomalies/Makefile.inc
index 682379133..94937b36a 100644
--- a/collectors/charts.d.plugin/cpufreq/Makefile.inc
+++ b/collectors/python.d.plugin/anomalies/Makefile.inc
@@ -5,9 +5,9 @@
# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
# install these files
-dist_charts_DATA += cpufreq/cpufreq.chart.sh
-dist_chartsconfig_DATA += cpufreq/cpufreq.conf
+dist_python_DATA += anomalies/anomalies.chart.py
+dist_pythonconfig_DATA += anomalies/anomalies.conf
# do not install these files, but include them in the distribution
-dist_noinst_DATA += cpufreq/README.md cpufreq/Makefile.inc
+dist_noinst_DATA += anomalies/README.md anomalies/Makefile.inc
diff --git a/collectors/python.d.plugin/anomalies/README.md b/collectors/python.d.plugin/anomalies/README.md
new file mode 100644
index 000000000..862f4f345
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/README.md
@@ -0,0 +1,231 @@
+<!--
+title: "Anomaly detection with Netdata"
+description: "Use ML-driven anomaly detection to narrow your focus to only affected metrics and services/processes on your node to shorten root cause analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/anomalies/README.md
+sidebar_url: Anomalies
+-->
+
+# Anomaly detection with Netdata
+
+This collector uses the Python [PyOD](https://pyod.readthedocs.io/en/latest/index.html) library to perform unsupervised [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) on your Netdata charts and/or dimensions.
+
+Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return an anomaly probability and anomaly flag for each chart or custom model you define. This computation consists of a **train** function that runs every `train_n_secs` to train the ML models to learn what 'normal' typically looks like on your node. At each iteration there is also a **predict** function that uses the latest trained models and most recent metrics to produce an anomaly probability and anomaly flag for each chart or custom model you define.
+
+> As this is a somewhat unique collector and involves often subjective concepts like anomalies and anomaly probabilities, we would love to hear any feedback on it from the community. Please let us know on the [community forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767) or drop us a note at [analytics-ml-team@netdata.cloud](mailto:analytics-ml-team@netdata.cloud) for any and all feedback, both positive and negative. This sort of feedback is priceless to help us make complex features more useful.
+
+## Charts
+
+Two charts are produced:
+
+- **Anomaly Probability** (`anomalies.probability`): This chart shows the probability that the latest observed data is anomalous based on the trained model for that chart (using the [`predict_proba()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict_proba) method of the trained PyOD model).
+- **Anomaly** (`anomalies.anomaly`): This chart shows `1` or `0` predictions of if the latest observed data is considered anomalous or not based on the trained model (using the [`predict()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict) method of the trained PyOD model).
+
+Below is an example of the charts produced by this collector and how they might look when things are 'normal' on the node. The anomaly probabilities tend to bounce randomly around a typically low probability range, one or two might randomly jump or drift outside of this range every now and then and show up as anomalies on the anomaly chart.
+
+![netdata-anomalies-collector-normal](https://user-images.githubusercontent.com/2178292/100663699-99755000-334e-11eb-922f-0c41a0176484.jpg)
+
+If we then go onto the system and run a command like `stress-ng --all 2` to create some [stress](https://wiki.ubuntu.com/Kernel/Reference/stress-ng), we see some charts begin to have anomaly probabilities that jump outside the typical range. When the anomaly probabilities change enough, we will start seeing anomalies being flagged on the `anomalies.anomaly` chart. The idea is that these charts are the most anomalous right now so could be a good place to start your troubleshooting.
+
+![netdata-anomalies-collector-abnormal](https://user-images.githubusercontent.com/2178292/100663710-9bd7aa00-334e-11eb-9d14-76fda73bc309.jpg)
+
+Then, as the issue passes, the anomaly probabilities should settle back down into their 'normal' range again.
+
+![netdata-anomalies-collector-normal-again](https://user-images.githubusercontent.com/2178292/100666681-481a9000-3351-11eb-9979-64728ee2dfb6.jpg)
+
+## Requirements
+
+- This collector will only work with Python 3 and requires the packages below be installed.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# install required packages for the netdata user
+pip3 install --user netdata-pandas==0.0.32 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3
+```
+
+## Configuration
+
+Install the Python requirements above, enable the collector and restart Netdata.
+
+```bash
+cd /etc/netdata/
+sudo ./edit-config python.d.conf
+# Set `anomalies: no` to `anomalies: yes`
+sudo systemctl restart netdata
+```
+
+The configuration for the anomalies collector defines how it will behave on your system and might take some experimentation with over time to set it optimally for your node. Out of the box, the config comes with some [sane defaults](https://www.netdata.cloud/blog/redefining-monitoring-netdata/) to get you started that try to balance the flexibility and power of the ML models with the goal of being as cheap as possible in term of cost on the node resources.
+
+_**Note**: If you are unsure about any of the below configuration options then it's best to just ignore all this and leave the `anomalies.conf` file alone to begin with. Then you can return to it later if you would like to tune things a bit more once the collector is running for a while and you have a feeling for its performance on your node._
+
+Edit the `python.d/anomalies.conf` configuration file using `edit-config` from the your agent's [config
+directory](/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/anomalies.conf
+```
+
+The default configuration should look something like this. Here you can see each parameter (with sane defaults) and some information about each one and what it does.
+
+```yaml
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# Pull data from local Netdata node.
+local:
+ name: 'local'
+
+ # Host to pull data from.
+ host: '127.0.0.1:19999'
+
+ # Username and Password for Netdata if using basic auth.
+ # username: '???'
+ # password: '???'
+
+ # Use http or https to pull data
+ protocol: 'http'
+
+ # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: 'system.uptime,system.entropy'
+
+ # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
+ # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
+ model: 'pca'
+
+ # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
+ train_max_n: 100000
+
+ # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
+ # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
+ train_every_n: 1800
+
+ # The length of the window of data to train on (14400 = last 4 hours).
+ train_n_secs: 14400
+
+ # How many prediction steps after a train event to just use previous prediction value for.
+ # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
+ train_no_prediction_n: 10
+
+ # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
+ # Start of training data for initial model.
+ # initial_train_data_after: 1604578857
+
+ # End of training data for initial model.
+ # initial_train_data_before: 1604593257
+
+ # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
+ offset_n_secs: 0
+
+ # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
+ lags_n: 5
+
+ # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
+ smooth_n: 3
+
+ # How many differences to take in preprocessing your data.
+ # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
+ # diffs_n=0 would mean training models on the raw values of each dimension.
+ # diffs_n=1 means everything is done in terms of differences.
+ diffs_n: 1
+
+ # What is the typical proportion of anomalies in your data on average?
+ # This parameter can control the sensitivity of your models to anomalies.
+ # Some discussion here: https://github.com/yzhao062/pyod/issues/144
+ contamination: 0.001
+
+ # Set to true to include an "average_prob" dimension on anomalies probability chart which is
+ # just the average of all anomaly probabilities at each time step
+ include_average_prob: true
+
+ # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
+ # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
+ # and one on the cpu and mem apps metrics for the python.d.plugin.
+ # custom_models:
+ # - name: 'demos_cpu'
+ # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+ # - name: 'apps_python_d_plugin'
+ # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
+
+ # Set to true to normalize, using min-max standardization, features used for the custom models.
+ # Useful if your custom models contain dimensions on very different scales an model you use does
+ # not internally do its own normalization. Usually best to leave as false.
+ # custom_models_normalize: false
+```
+
+## Custom models
+
+In the `anomalies.conf` file you can also define some "custom models" which you can use to group one or more metrics into a single model much like is done by default for the charts you specify. This is useful if you have a handful of metrics that exist in different charts but perhaps are related to the same underlying thing you would like to perform anomaly detection on, for example a specific app or user.
+
+To define a custom model you would include configuration like below in `anomalies.conf`. By default there should already be some commented out examples in there.
+
+`name` is a name you give your custom model, this is what will appear alongside any other specified charts in the `anomalies.probability` and `anomalies.anomaly` charts. `dimensions` is a string of metrics you want to include in your custom model. By default the [netdata-pandas](https://github.com/netdata/netdata-pandas) library used to pull the data from Netdata uses a "chart.a|dim.1" type of naming convention in the pandas columns it returns, hence the `dimensions` string should look like "chart.name|dimension.name,chart.name|dimension.name". The examples below hopefully make this clear.
+
+```yaml
+custom_models:
+ # a model for anomaly detection on the netdata user in terms of cpu, mem, threads, processes and sockets.
+ - name: 'user_netdata'
+ dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
+ # a model for anomaly detection on the netdata python.d.plugin app in terms of cpu, mem, threads, processes and sockets.
+ - name: 'apps_python_d_plugin'
+ dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
+
+custom_models_normalize: false
+```
+
+## Troubleshooting
+
+To see any relevant log messages you can use a command like below.
+
+```bash
+`grep 'anomalies' /var/log/netdata/error.log`
+```
+
+If you would like to log in as `netdata` user and run the collector in debug mode to see more detail.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# run collector in debug using `nolock` option if netdata is already running the collector itself.
+/usr/libexec/netdata/plugins.d/python.d.plugin anomalies debug trace nolock
+```
+
+## Deepdive tutorial
+
+If you would like to go deeper on what exactly the anomalies collector is doing under the hood then check out this [deepdive tutorial](https://github.com/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb) in our community repo where you can play around with some data from our demo servers (or your own if its accessible to you) and work through the calculations step by step.
+
+(Note: as its a Jupyter Notebook it might render a little prettier on [nbviewer](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb))
+
+## Notes
+
+- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](https://learn.netdata.cloud/docs/agent/web/api) to get the required data for each chart.
+- Python 3 is also required for the underlying ML libraries of [numba](https://pypi.org/project/numba/), [scikit-learn](https://pypi.org/project/scikit-learn/), and [PyOD](https://pypi.org/project/pyod/).
+- It may take a few hours or so (depending on your choice of `train_secs_n`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and probabilities you will see in the normal running of your node.
+- As this collector does most of the work in Python itself, with [PyOD](https://pyod.readthedocs.io/en/latest/) leveraging [numba](https://numba.pydata.org/) under the hood, you may want to try it out first on a test or development system to get a sense of its performance characteristics on a node similar to where you would like to use it.
+- `lags_n`, `smooth_n`, and `diffs_n` together define the preprocessing done to the raw data before models are trained and before each prediction. This essentially creates a [feature vector](https://en.wikipedia.org/wiki/Feature_(machine_learning)#:~:text=In%20pattern%20recognition%20and%20machine,features%20that%20represent%20some%20object.&text=Feature%20vectors%20are%20often%20combined,score%20for%20making%20a%20prediction.) for each chart model (or each custom model). The default settings for these parameters aim to create a rolling matrix of recent smoothed [differenced](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing) values for each chart. The aim of the model then is to score how unusual this 'matrix' of features is for each chart based on what it has learned as 'normal' from the training data. So as opposed to just looking at the single most recent value of a dimension and considering how strange it is, this approach looks at a recent smoothed window of all dimensions for a chart (or dimensions in a custom model) and asks how unusual the data as a whole looks. This should be more flexible in capturing a wider range of [anomaly types](https://andrewm4894.com/2020/10/19/different-types-of-time-series-anomalies/) and be somewhat more robust to temporary 'spikes' in the data that tend to always be happening somewhere in your metrics but often are not the most important type of anomaly (this is all covered in a lot more detail in the [deepdive tutorial](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb)).
+- You can see how long model training is taking by looking in the logs for the collector `grep 'anomalies' /var/log/netdata/error.log | grep 'training'` and you should see lines like `2020-12-01 22:02:14: python.d INFO: anomalies[local] : training complete in 2.81 seconds (runs_counter=2700, model=pca, train_n_secs=14400, models=26, n_fit_success=26, n_fit_fails=0, after=1606845731, before=1606860131).`.
+ - This also gives counts of the number of models, if any, that failed to fit and so had to default back to the DefaultModel (which is currently [HBOS](https://pyod.readthedocs.io/en/latest/_modules/pyod/models/hbos.html)).
+ - `after` and `before` here refer to the start and end of the training data used to train the models.
+- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the typical performance characteristics we saw from running this collector (with defaults) were:
+ - A runtime (`netdata.runtime_anomalies`) of ~80ms when doing scoring and ~3 seconds when training or retraining the models.
+ - Typically ~3%-3.5% additional cpu usage from scoring, jumping to ~60% for a couple of seconds during model training.
+ - About ~150mb of ram (`apps.mem`) being continually used by the `python.d.plugin`.
+- If you activate this collector on a fresh node, it might take a little while to build up enough data to calculate a realistic and useful model.
+- Some models like `iforest` can be comparatively expensive (on same n1-standard-2 system above ~2s runtime during predict, ~40s training time, ~50% cpu on both train and predict) so if you would like to use it you might be advised to set a relatively high `update_every` maybe 10, 15 or 30 in `anomalies.conf`.
+- Setting a higher `train_every_n` and `update_every` is an easy way to devote less resources on the node to anomaly detection. Specifying less charts and a lower `train_n_secs` will also help reduce resources at the expense of covering less charts and maybe a more noisy model if you set `train_n_secs` to be too small for how your node tends to behave.
+
+## Useful links and further reading
+
+- [PyOD documentation](https://pyod.readthedocs.io/en/latest/), [PyOD Github](https://github.com/yzhao062/pyod).
+- [Anomaly Detection](https://en.wikipedia.org/wiki/Anomaly_detection) wikipedia page.
+- [Anomaly Detection YouTube playlist](https://www.youtube.com/playlist?list=PL6Zhl9mK2r0KxA6rB87oi4kWzoqGd5vp0) maintained by [andrewm4894](https://github.com/andrewm4894/) from Netdata.
+- [awesome-TS-anomaly-detection](https://github.com/rob-med/awesome-TS-anomaly-detection) Github list of useful tools, libraries and resources.
+- [Mendeley public group](https://www.mendeley.com/community/interesting-anomaly-detection-papers/) with some interesting anomaly detection papers we have been reading.
+- Good [blog post](https://www.anodot.com/blog/what-is-anomaly-detection/) from Anodot on time series anomaly detection. Anodot also have some great whitepapers in this space too that some may find useful.
+- Novelty and outlier detection in the [scikit-learn documentation](https://scikit-learn.org/stable/modules/outlier_detection.html).
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fanomalies%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/anomalies/anomalies.chart.py b/collectors/python.d.plugin/anomalies/anomalies.chart.py
new file mode 100644
index 000000000..97dbb1d1e
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/anomalies.chart.py
@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+# Description: anomalies netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import time
+from datetime import datetime
+import re
+import warnings
+
+import requests
+import numpy as np
+import pandas as pd
+from netdata_pandas.data import get_data, get_allmetrics_async
+from pyod.models.hbos import HBOS
+from pyod.models.pca import PCA
+from pyod.models.loda import LODA
+from pyod.models.iforest import IForest
+from pyod.models.cblof import CBLOF
+from pyod.models.feature_bagging import FeatureBagging
+from pyod.models.copod import COPOD
+from sklearn.preprocessing import MinMaxScaler
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# ignore some sklearn/numpy warnings that are ok
+warnings.filterwarnings('ignore', r'All-NaN slice encountered')
+warnings.filterwarnings('ignore', r'invalid value encountered in true_divide')
+warnings.filterwarnings('ignore', r'divide by zero encountered in true_divide')
+warnings.filterwarnings('ignore', r'invalid value encountered in subtract')
+
+disabled_by_default = True
+
+ORDER = ['probability', 'anomaly']
+
+CHARTS = {
+ 'probability': {
+ 'options': ['probability', 'Anomaly Probability', 'probability', 'anomalies', 'anomalies.probability', 'line'],
+ 'lines': []
+ },
+ 'anomaly': {
+ 'options': ['anomaly', 'Anomaly', 'count', 'anomalies', 'anomalies.anomaly', 'stacked'],
+ 'lines': []
+ },
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.basic_init()
+ self.charts_init()
+ self.custom_models_init()
+ self.model_params_init()
+ self.models_init()
+
+ def check(self):
+ _ = get_allmetrics_async(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True,
+ protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password
+ )
+ return True
+
+ def basic_init(self):
+ """Perform some basic initialization.
+ """
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.protocol = self.configuration.get('protocol', 'http')
+ self.host = self.configuration.get('host', '127.0.0.1:19999')
+ self.username = self.configuration.get('username', None)
+ self.password = self.configuration.get('password', None)
+ self.fitted_at = {}
+ self.df_allmetrics = pd.DataFrame()
+ self.data_latest = {}
+ self.last_train_at = 0
+ self.include_average_prob = bool(self.configuration.get('include_average_prob', True))
+
+ def charts_init(self):
+ """Do some initialisation of charts in scope related variables.
+ """
+ self.charts_regex = re.compile(self.configuration.get('charts_regex','None'))
+ self.charts_available = [c for c in list(requests.get(f'{self.protocol}://{self.host}/api/v1/charts').json().get('charts', {}).keys())]
+ self.charts_in_scope = list(filter(self.charts_regex.match, self.charts_available))
+ self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
+ if len(self.charts_to_exclude) > 0:
+ self.charts_in_scope = [c for c in self.charts_in_scope if c not in self.charts_to_exclude]
+
+ def custom_models_init(self):
+ """Perform initialization steps related to custom models.
+ """
+ self.custom_models = self.configuration.get('custom_models', None)
+ self.custom_models_normalize = bool(self.configuration.get('custom_models_normalize', False))
+ if self.custom_models:
+ self.custom_models_names = [model['name'] for model in self.custom_models]
+ self.custom_models_dims = [i for s in [model['dimensions'].split(',') for model in self.custom_models] for i in s]
+ self.custom_models_dims = [dim if '::' in dim else f'{self.host}::{dim}' for dim in self.custom_models_dims]
+ self.custom_models_charts = list(set([dim.split('|')[0].split('::')[1] for dim in self.custom_models_dims]))
+ self.custom_models_hosts = list(set([dim.split('::')[0] for dim in self.custom_models_dims]))
+ self.custom_models_host_charts_dict = {}
+ for host in self.custom_models_hosts:
+ self.custom_models_host_charts_dict[host] = list(set([dim.split('::')[1].split('|')[0] for dim in self.custom_models_dims if dim.startswith(host)]))
+ self.custom_models_dims_renamed = [f"{model['name']}|{dim}" for model in self.custom_models for dim in model['dimensions'].split(',')]
+ self.models_in_scope = list(set([f'{self.host}::{c}' for c in self.charts_in_scope] + self.custom_models_names))
+ self.charts_in_scope = list(set(self.charts_in_scope + self.custom_models_charts))
+ self.host_charts_dict = {self.host: self.charts_in_scope}
+ for host in self.custom_models_host_charts_dict:
+ if host not in self.host_charts_dict:
+ self.host_charts_dict[host] = self.custom_models_host_charts_dict[host]
+ else:
+ for chart in self.custom_models_host_charts_dict[host]:
+ if chart not in self.host_charts_dict[host]:
+ self.host_charts_dict[host].extend(chart)
+ else:
+ self.models_in_scope = [f'{self.host}::{c}' for c in self.charts_in_scope]
+ self.host_charts_dict = {self.host: self.charts_in_scope}
+ self.model_display_names = {model: model.split('::')[1] if '::' in model else model for model in self.models_in_scope}
+
+ def model_params_init(self):
+ """Model parameters initialisation.
+ """
+ self.train_max_n = self.configuration.get('train_max_n', 100000)
+ self.train_n_secs = self.configuration.get('train_n_secs', 14400)
+ self.offset_n_secs = self.configuration.get('offset_n_secs', 0)
+ self.train_every_n = self.configuration.get('train_every_n', 1800)
+ self.train_no_prediction_n = self.configuration.get('train_no_prediction_n', 10)
+ self.initial_train_data_after = self.configuration.get('initial_train_data_after', 0)
+ self.initial_train_data_before = self.configuration.get('initial_train_data_before', 0)
+ self.contamination = self.configuration.get('contamination', 0.001)
+ self.lags_n = {model: self.configuration.get('lags_n', 5) for model in self.models_in_scope}
+ self.smooth_n = {model: self.configuration.get('smooth_n', 5) for model in self.models_in_scope}
+ self.diffs_n = {model: self.configuration.get('diffs_n', 5) for model in self.models_in_scope}
+
+ def models_init(self):
+ """Models initialisation.
+ """
+ self.model = self.configuration.get('model', 'pca')
+ if self.model == 'pca':
+ self.models = {model: PCA(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'loda':
+ self.models = {model: LODA(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'iforest':
+ self.models = {model: IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'cblof':
+ self.models = {model: CBLOF(n_clusters=3, contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'feature_bagging':
+ self.models = {model: FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'copod':
+ self.models = {model: COPOD(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'hbos':
+ self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
+ else:
+ self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
+ self.custom_model_scalers = {model: MinMaxScaler() for model in self.models_in_scope}
+
+ def validate_charts(self, name, data, algorithm='absolute', multiplier=1, divisor=1):
+ """If dimension not in chart then add it.
+ """
+ for dim in data:
+ if dim not in self.charts[name]:
+ self.charts[name].add_dimension([dim, dim, algorithm, multiplier, divisor])
+
+ def add_custom_models_dims(self, df):
+ """Given a df, select columns used by custom models, add custom model name as prefix, and append to df.
+
+ :param df <pd.DataFrame>: dataframe to append new renamed columns to.
+ :return: <pd.DataFrame> dataframe with additional columns added relating to the specified custom models.
+ """
+ df_custom = df[self.custom_models_dims].copy()
+ df_custom.columns = self.custom_models_dims_renamed
+ df = df.join(df_custom)
+
+ return df
+
+ def make_features(self, arr, train=False, model=None):
+ """Take in numpy array and preprocess accordingly by taking diffs, smoothing and adding lags.
+
+ :param arr <np.ndarray>: numpy array we want to make features from.
+ :param train <bool>: True if making features for training, in which case need to fit_transform scaler and maybe sample train_max_n.
+ :param model <str>: model to make features for.
+ :return: <np.ndarray> transformed numpy array.
+ """
+
+ def lag(arr, n):
+ res = np.empty_like(arr)
+ res[:n] = np.nan
+ res[n:] = arr[:-n]
+
+ return res
+
+ arr = np.nan_to_num(arr)
+
+ diffs_n = self.diffs_n[model]
+ smooth_n = self.smooth_n[model]
+ lags_n = self.lags_n[model]
+
+ if self.custom_models_normalize and model in self.custom_models_names:
+ if train:
+ arr = self.custom_model_scalers[model].fit_transform(arr)
+ else:
+ arr = self.custom_model_scalers[model].transform(arr)
+
+ if diffs_n > 0:
+ arr = np.diff(arr, diffs_n, axis=0)
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if smooth_n > 1:
+ arr = np.cumsum(arr, axis=0, dtype=float)
+ arr[smooth_n:] = arr[smooth_n:] - arr[:-smooth_n]
+ arr = arr[smooth_n - 1:] / smooth_n
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if lags_n > 0:
+ arr_orig = np.copy(arr)
+ for lag_n in range(1, lags_n + 1):
+ arr = np.concatenate((arr, lag(arr_orig, lag_n)), axis=1)
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if train:
+ if len(arr) > self.train_max_n:
+ arr = arr[np.random.randint(arr.shape[0], size=self.train_max_n), :]
+
+ arr = np.nan_to_num(arr)
+
+ return arr
+
+ def train(self, models_to_train=None, train_data_after=0, train_data_before=0):
+ """Pull required training data and train a model for each specified model.
+
+ :param models_to_train <list>: list of models to train on.
+ :param train_data_after <int>: integer timestamp for start of train data.
+ :param train_data_before <int>: integer timestamp for end of train data.
+ """
+ now = datetime.now().timestamp()
+ if train_data_after > 0 and train_data_before > 0:
+ before = train_data_before
+ after = train_data_after
+ else:
+ before = int(now) - self.offset_n_secs
+ after = before - self.train_n_secs
+
+ # get training data
+ df_train = get_data(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', after=after, before=before,
+ sort_cols=True, numeric_only=True, protocol=self.protocol, float_size='float32', user=self.username, pwd=self.password
+ ).ffill()
+ if self.custom_models:
+ df_train = self.add_custom_models_dims(df_train)
+
+ # train model
+ self.try_fit(df_train, models_to_train=models_to_train)
+ self.info(f'training complete in {round(time.time() - now, 2)} seconds (runs_counter={self.runs_counter}, model={self.model}, train_n_secs={self.train_n_secs}, models={len(self.fitted_at)}, n_fit_success={self.n_fit_success}, n_fit_fails={self.n_fit_fail}, after={after}, before={before}).')
+ self.last_train_at = self.runs_counter
+
+ def try_fit(self, df_train, models_to_train=None):
+ """Try fit each model and try to fallback to a default model if fit fails for any reason.
+
+ :param df_train <pd.DataFrame>: data to train on.
+ :param models_to_train <list>: list of models to train.
+ """
+ if models_to_train is None:
+ models_to_train = list(self.models.keys())
+ self.n_fit_fail, self.n_fit_success = 0, 0
+ for model in models_to_train:
+ X_train = self.make_features(
+ df_train[df_train.columns[df_train.columns.str.startswith(f'{model}|')]].values,
+ train=True, model=model)
+ try:
+ self.models[model].fit(X_train)
+ self.n_fit_success += 1
+ except Exception as e:
+ self.n_fit_fail += 1
+ self.info(e)
+ self.info(f'training failed for {model} at run_counter {self.runs_counter}, defaulting to hbos model.')
+ self.models[model] = HBOS(contamination=self.contamination)
+ self.models[model].fit(X_train)
+ self.fitted_at[model] = self.runs_counter
+
+ def predict(self):
+ """Get latest data, make it into a feature vector, and get predictions for each available model.
+
+ :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
+ """
+ # get recent data to predict on
+ df_allmetrics = get_allmetrics_async(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True,
+ protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password
+ )
+ if self.custom_models:
+ df_allmetrics = self.add_custom_models_dims(df_allmetrics)
+ self.df_allmetrics = self.df_allmetrics.append(df_allmetrics).ffill().tail((max(self.lags_n.values()) + max(self.smooth_n.values()) + max(self.diffs_n.values())) * 2)
+
+ # get predictions
+ data_probability, data_anomaly = self.try_predict()
+
+ return data_probability, data_anomaly
+
+ def try_predict(self):
+ """Try make prediction and fall back to last known prediction if fails.
+
+ :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
+ """
+ data_probability, data_anomaly = {}, {}
+ for model in self.fitted_at.keys():
+ model_display_name = self.model_display_names[model]
+ X_model = np.nan_to_num(self.make_features(
+ self.df_allmetrics[self.df_allmetrics.columns[self.df_allmetrics.columns.str.startswith(f'{model}|')]].values,
+ model=model)[-1,:].reshape(1, -1))
+ try:
+ data_probability[model_display_name + '_prob'] = np.nan_to_num(self.models[model].predict_proba(X_model)[-1][1]) * 10000
+ data_anomaly[model_display_name + '_anomaly'] = self.models[model].predict(X_model)[-1]
+ except Exception:
+ #self.info(e)
+ if model_display_name + '_prob' in self.data_latest:
+ #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, using last prediction instead.')
+ data_probability[model_display_name + '_prob'] = self.data_latest[model_display_name + '_prob']
+ data_anomaly[model_display_name + '_anomaly'] = self.data_latest[model_display_name + '_anomaly']
+ else:
+ #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, skipping as no previous prediction.')
+ continue
+
+ return data_probability, data_anomaly
+
+ def get_data(self):
+
+ # if not all models have been trained then train those we need to
+ if len(self.fitted_at) < len(self.models):
+ self.train(
+ models_to_train=[m for m in self.models if m not in self.fitted_at],
+ train_data_after=self.initial_train_data_after,
+ train_data_before=self.initial_train_data_before)
+ # retrain all models as per schedule from config
+ elif self.train_every_n > 0 and self.runs_counter % self.train_every_n == 0:
+ self.train()
+
+ # roll forward previous predictions around a training step to avoid the possibility of having the training itself trigger an anomaly
+ if (self.runs_counter - self.last_train_at) <= self.train_no_prediction_n:
+ data = self.data_latest
+ else:
+ data_probability, data_anomaly = self.predict()
+ if self.include_average_prob:
+ data_probability['average_prob'] = np.mean(list(data_probability.values()))
+ data = {**data_probability, **data_anomaly}
+ self.validate_charts('probability', data_probability, divisor=100)
+ self.validate_charts('anomaly', data_anomaly)
+
+ self.data_latest = data
+
+ return data
diff --git a/collectors/python.d.plugin/anomalies/anomalies.conf b/collectors/python.d.plugin/anomalies/anomalies.conf
new file mode 100644
index 000000000..9950534aa
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/anomalies.conf
@@ -0,0 +1,181 @@
+# netdata python.d.plugin configuration for anomalies
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 2
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# Pull data from local Netdata node.
+local:
+ name: 'local'
+
+ # Host to pull data from.
+ host: '127.0.0.1:19999'
+
+ # Username and Password for Netdata if using basic auth.
+ # username: '???'
+ # password: '???'
+
+ # Use http or https to pull data
+ protocol: 'http'
+
+ # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: 'system.uptime,system.entropy'
+
+ # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
+ # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
+ model: 'pca'
+
+ # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
+ train_max_n: 100000
+
+ # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
+ # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
+ train_every_n: 1800
+
+ # The length of the window of data to train on (14400 = last 4 hours).
+ train_n_secs: 14400
+
+ # How many prediction steps after a train event to just use previous prediction value for.
+ # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
+ train_no_prediction_n: 10
+
+ # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
+ # Start of training data for initial model.
+ # initial_train_data_after: 1604578857
+
+ # End of training data for initial model.
+ # initial_train_data_before: 1604593257
+
+ # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
+ offset_n_secs: 0
+
+ # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
+ lags_n: 5
+
+ # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
+ smooth_n: 3
+
+ # How many differences to take in preprocessing your data.
+ # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
+ # diffs_n=0 would mean training models on the raw values of each dimension.
+ # diffs_n=1 means everything is done in terms of differences.
+ diffs_n: 1
+
+ # What is the typical proportion of anomalies in your data on average?
+ # This parameter can control the sensitivity of your models to anomalies.
+ # Some discussion here: https://github.com/yzhao062/pyod/issues/144
+ contamination: 0.001
+
+ # Set to true to include an "average_prob" dimension on anomalies probability chart which is
+ # just the average of all anomaly probabilities at each time step
+ include_average_prob: true
+
+ # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
+ # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
+ # and one on the cpu and mem apps metrics for the python.d.plugin.
+ # custom_models:
+ # - name: 'demos_cpu'
+ # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+ # - name: 'apps_python_d_plugin'
+ # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
+
+ # Set to true to normalize, using min-max standardization, features used for the custom models.
+ # Useful if your custom models contain dimensions on very different scales an model you use does
+ # not internally do its own normalization. Usually best to leave as false.
+ # custom_models_normalize: false
+
+# Standalone Custom models example as an additional collector job.
+# custom:
+# name: 'custom'
+# host: '127.0.0.1:19999'
+# protocol: 'http'
+# charts_regex: 'None'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
+# custom_models:
+# - name: 'user_netdata'
+# dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
+# - name: 'apps_python_d_plugin'
+# dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
+
+# Pull data from some demo nodes for cross node custom models.
+# demos:
+# name: 'demos'
+# host: '127.0.0.1:19999'
+# protocol: 'http'
+# charts_regex: 'None'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
+# custom_models:
+# - name: 'system.cpu'
+# dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+# - name: 'system.ip'
+# dimensions: 'london.my-netdata.io::system.ip|received,london.my-netdata.io::system.ip|sent,newyork.my-netdata.io::system.ip|received,newyork.my-netdata.io::system.ip|sent'
+# - name: 'system.net'
+# dimensions: 'london.my-netdata.io::system.net|received,london.my-netdata.io::system.net|sent,newyork.my-netdata.io::system.net|received,newyork.my-netdata.io::system.net|sent'
+# - name: 'system.io'
+# dimensions: 'london.my-netdata.io::system.io|in,london.my-netdata.io::system.io|out,newyork.my-netdata.io::system.io|in,newyork.my-netdata.io::system.io|out'
+
+# Example additional job if you want to also pull data from a child streaming to your
+# local parent or even a remote node so long as the Netdata REST API is accessible.
+# mychildnode1:
+# name: 'mychildnode1'
+# host: '127.0.0.1:19999/host/mychildnode1'
+# protocol: 'http'
+# charts_regex: 'system\..*'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
index 8f0ec0c1c..d27525095 100644
--- a/collectors/python.d.plugin/apache/README.md
+++ b/collectors/python.d.plugin/apache/README.md
@@ -1,8 +1,14 @@
-# apache
+<!--
+title: "Apache monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/apache/README.md
+sidebar_label: "Apache"
+-->
-This module will monitor one or more Apache servers depending on configuration.
+# Apache monitoring with Netdata
-**Requirements:**
+Monitors one or more Apache servers depending on configuration.
+
+## Requirements
- apache with enabled `mod_status`
@@ -43,11 +49,19 @@ It produces the following charts:
- size_req
-## configuration
+## Configuration
+
+Edit the `python.d/apache.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/apache.conf
+```
Needs only `url` to server's `server-status?auto`
-Here is an example for 2 servers:
+Example for two servers:
```yaml
update_every : 10
diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
index 655616d07..ceac9ecd5 100644
--- a/collectors/python.d.plugin/apache/apache.chart.py
+++ b/collectors/python.d.plugin/apache/apache.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'requests',
'connections',
@@ -38,7 +37,7 @@ CHARTS = {
]},
'bytespersec': {
'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
- 'apache.bytesperreq', 'area'],
+ 'apache.bytespersec', 'area'],
'lines': [
['size_sec', None, 'absolute', 8, 1000 * 100000]
]},
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
index c93dfa0d4..24315adb4 100644
--- a/collectors/python.d.plugin/beanstalk/README.md
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -1,8 +1,14 @@
-# beanstalk
+<!--
+title: "Beanstalk monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/beanstalk/README.md
+sidebar_label: "Beanstalk"
+-->
-Module provides server and tube-level statistics:
+# Beanstalk monitoring with Netdata
-**Requirements:**
+Provides server and tube-level statistics.
+
+## Requirements
- `python-beanstalkc`
@@ -103,7 +109,15 @@ Module provides server and tube-level statistics:
- since
- left
-## configuration
+## Configuration
+
+Edit the `python.d/beanstalk.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/beanstalk.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
index 9c8319872..396543e5a 100644
--- a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -5,6 +5,7 @@
try:
import beanstalkc
+
BEANSTALKC = True
except ImportError:
BEANSTALKC = False
@@ -12,7 +13,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
from bases.loaders import load_yaml
-
ORDER = [
'cpu_usage',
'jobs_rate',
@@ -109,7 +109,7 @@ CHARTS = {
'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'],
'lines': [
['uptime'],
- ]
+ ]
}
}
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
index 021a5d660..2832575dd 100644
--- a/collectors/python.d.plugin/bind_rndc/README.md
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -1,8 +1,14 @@
-# bind_rndc
+<!--
+title: "ISC Bind monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/bind_rndc/README.md
+sidebar_label: "ISC Bind"
+-->
-Module parses bind dump file to collect real-time performance metrics
+# ISC Bind monitoring with Netdata
-**Requirements:**
+Collects Name server summary performance statistics using `rndc` tool.
+
+## Requirements
- Version of bind must be 9.6 +
- Netdata must have permissions to run `rndc stats`
@@ -49,7 +55,15 @@ It produces:
- Same as Incoming queries
-## configuration
+## Configuration
+
+Edit the `python.d/bind_rndc.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/bind_rndc.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
index 60f40c2f7..9d6c9fec7 100644
--- a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -4,13 +4,11 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import os
-
from collections import defaultdict
from subprocess import Popen
-from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
-
+from bases.collection import find_binary
update_every = 30
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
index 260ae54b6..bd509c900 100644
--- a/collectors/python.d.plugin/boinc/README.md
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -1,13 +1,24 @@
-# boinc
+<!--
+title: "BOINC monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/boinc/README.md
+sidebar_label: "BOINC"
+-->
-This module monitors task counts for the Berkely Open Infrastructure
-Networking Computing (BOINC) distributed computing client using the same
-RPC interface that the BOINC monitoring GUI does.
+# BOINC monitoring with Netdata
-It provides charts tracking the total number of tasks and active tasks,
-as well as ones tracking each of the possible states for tasks.
+Monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client using the same RPC interface that the BOINC monitoring GUI does.
-## configuration
+It provides charts tracking the total number of tasks and active tasks, as well as ones tracking each of the possible states for tasks.
+
+## Configuration
+
+Edit the `python.d/boinc.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/boinc.conf
+```
BOINC requires use of a password to access it's RPC interface. You can
find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
index e10b28cea..a31eda1c2 100644
--- a/collectors/python.d.plugin/boinc/boinc.chart.py
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
@@ -6,10 +6,8 @@
import socket
from bases.FrameworkServices.SimpleService import SimpleService
-
from third_party import boinc_client
-
ORDER = [
'tasks',
'states',
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
index f5b36e149..5d671f2aa 100644
--- a/collectors/python.d.plugin/ceph/README.md
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -1,8 +1,12 @@
-# ceph
+<!--
+title: "CEPH monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ceph/README.md
+sidebar_label: "CEPH"
+-->
-This module monitors the ceph cluster usage and consumption data of a server.
+# CEPH monitoring with Netdata
-It produces:
+Monitors the ceph cluster usage and consumption data of a server, and produces:
- Cluster statistics (usage, available, latency, objects, read/write rate)
- OSD usage
@@ -12,7 +16,7 @@ It produces:
- Pool read/write rate
- number of objects per pool
-**Requirements:**
+## Requirements
- `rados` python module
- Granting read permissions to ceph group from keyring file
@@ -23,6 +27,14 @@ It produces:
## Configuration
+Edit the `python.d/ceph.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ceph.conf
+```
+
Sample:
```yaml
diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
index fe9b2b9ab..494eef45d 100644
--- a/collectors/python.d.plugin/ceph/ceph.chart.py
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -5,6 +5,7 @@
try:
import rados
+
CEPH = True
except ImportError:
CEPH = False
@@ -30,6 +31,7 @@ ORDER = [
'pool_read_operations',
'pool_write_operations',
'osd_usage',
+ 'osd_size',
'osd_apply_latency',
'osd_commit_latency'
]
@@ -100,6 +102,10 @@ CHARTS = {
'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
'lines': []
},
+ 'osd_size': {
+ 'options': [None, 'Ceph OSDs size', 'KiB', 'osd', 'ceph.osd_size', 'line'],
+ 'lines': []
+ },
'osd_apply_latency': {
'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'],
'lines': []
@@ -119,6 +125,7 @@ class Service(SimpleService):
self.definitions = CHARTS
self.config_file = self.configuration.get('config_file')
self.keyring_file = self.configuration.get('keyring_file')
+ self.rados_id = self.configuration.get('rados_id', 'admin')
def check(self):
"""
@@ -147,7 +154,8 @@ class Service(SimpleService):
return False
try:
self.cluster = rados.Rados(conffile=self.config_file,
- conf=dict(keyring=self.keyring_file))
+ conf=dict(keyring=self.keyring_file),
+ rados_id=self.rados_id)
self.cluster.connect()
except rados.Error as error:
self.error(error)
@@ -161,7 +169,7 @@ class Service(SimpleService):
:return: None
"""
# Pool lines
- for pool in sorted(self._get_df()['pools'], key=lambda x:sorted(x.keys())):
+ for pool in sorted(self._get_df()['pools'], key=lambda x: sorted(x.keys())):
self.definitions['pool_usage']['lines'].append([pool['name'],
pool['name'],
'absolute'])
@@ -169,23 +177,26 @@ class Service(SimpleService):
pool['name'],
'absolute'])
self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']),
- pool['name'],
- 'absolute', 1, 1024])
- self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
pool['name'],
'absolute', 1, 1024])
+ self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
- self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
pool['name'],
'absolute'])
+ self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
# OSD lines
- for osd in sorted(self._get_osd_df()['nodes'], key=lambda x:sorted(x.keys())):
+ for osd in sorted(self._get_osd_df()['nodes'], key=lambda x: sorted(x.keys())):
self.definitions['osd_usage']['lines'].append([osd['name'],
osd['name'],
'absolute'])
+ self.definitions['osd_size']['lines'].append(['size_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']),
osd['name'],
'absolute'])
@@ -203,8 +214,10 @@ class Service(SimpleService):
df = self._get_df()
osd_df = self._get_osd_df()
osd_perf = self._get_osd_perf()
+ osd_perf_infos = get_osd_perf_infos(osd_perf)
pool_stats = self._get_osd_pool_stats()
- data.update(self._get_general(osd_perf, pool_stats))
+
+ data.update(self._get_general(osd_perf_infos, pool_stats))
for pool in df['pools']:
data.update(self._get_pool_usage(pool))
data.update(self._get_pool_objects(pool))
@@ -212,14 +225,15 @@ class Service(SimpleService):
data.update(self._get_pool_rw(pool_io))
for osd in osd_df['nodes']:
data.update(self._get_osd_usage(osd))
- for osd_apply_commit in osd_perf['osd_perf_infos']:
+ data.update(self._get_osd_size(osd))
+ for osd_apply_commit in osd_perf_infos:
data.update(self._get_osd_latency(osd_apply_commit))
return data
except (ValueError, AttributeError) as error:
self.error(error)
return None
- def _get_general(self, osd_perf, pool_stats):
+ def _get_general(self, osd_perf_infos, pool_stats):
"""
Get ceph's general usage
:return: dict
@@ -237,7 +251,7 @@ class Service(SimpleService):
write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0)
read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0)
write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0)
- for perf in osd_perf['osd_perf_infos']:
+ for perf in osd_perf_infos:
apply_latency += perf['perf_stats']['apply_latency_ms']
commit_latency += perf['perf_stats']['commit_latency_ms']
@@ -291,6 +305,14 @@ class Service(SimpleService):
return {osd['name']: float(osd['kb_used'])}
@staticmethod
+ def _get_osd_size(osd):
+ """
+ Process raw data into osd dict information to get osd size (kb)
+ :return: A osd dict with osd name's key and size bytes' value
+ """
+ return {'size_{0}'.format(osd['name']): float(osd['kb'])}
+
+ @staticmethod
def _get_osd_latency(osd):
"""
Get ceph osd apply and commit latency
@@ -342,3 +364,11 @@ class Service(SimpleService):
'prefix': 'osd pool stats',
'format': 'json'
}), '')[1].decode('utf-8'))
+
+
+def get_osd_perf_infos(osd_perf):
+ # https://github.com/netdata/netdata/issues/8247
+ # module uses 'osd_perf_infos' data, its been moved under 'osdstats` since Ceph v14.2
+ if 'osd_perf_infos' in osd_perf:
+ return osd_perf['osd_perf_infos']
+ return osd_perf['osdstats']['osd_perf_infos']
diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
index 4caabbf6d..81788e866 100644
--- a/collectors/python.d.plugin/ceph/ceph.conf
+++ b/collectors/python.d.plugin/ceph/ceph.conf
@@ -64,10 +64,12 @@
# config_file: 'config_file' # Ceph config file.
# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group
# # and keyring file must be read group permission.
+# rados_id: 'rados username' # ID used to connect to ceph cluster. Allows
+# # creating a read only key for pulling data v.s. admin
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)
#
config_file: '/etc/ceph/ceph.conf'
keyring_file: '/etc/ceph/ceph.client.admin.keyring'
-
+rados_id: 'admin'
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
index a45adb333..b1e7ec35c 100644
--- a/collectors/python.d.plugin/chrony/README.md
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -1,8 +1,12 @@
-# chrony
+<!--
+title: "Chrony monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/chrony/README.md
+sidebar_label: "Chrony"
+-->
-This module monitors the precision and statistics of a local chronyd server.
+# Chrony monitoring with Netdata
-It produces:
+Monitors the precision and statistics of a local chronyd server, and produces:
- frequency
- last offset
@@ -13,11 +17,33 @@ It produces:
- skew
- system time
-**Requirements:**
+## Requirements
+
Verify that user Netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
+## Enable the collector
+
+The `chrony` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `chrony` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `chrony` collector.
+
## Configuration
+Edit the `python.d/chrony.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/chrony.conf
+```
+
Sample:
```yaml
@@ -29,6 +55,7 @@ local:
command: 'chronyc -n tracking'
```
----
+Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the appropriate method for your
+system, to finish configuring the `chrony` collector.
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fchrony%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
index 288970674..896bbdd31 100644
--- a/collectors/python.d.plugin/couchdb/README.md
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -1,6 +1,12 @@
-# couchdb
+<!--
+title: "Apache CouchDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/couchdb/README.md
+sidebar_label: "CouchDB"
+-->
-This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
+# Apache CouchDB monitoring with Netdata
+
+Monitors vital statistics of a local Apache CouchDB 2.x server, including:
- Overall server reads/writes
- HTTP traffic breakdown
@@ -13,6 +19,14 @@ This module monitors vital statistics of a local Apache CouchDB 2.x server, incl
## Configuration
+Edit the `python.d/couchdb.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/couchdb.conf
+```
+
Sample for a local server running on port 5984:
```yaml
diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
index 50fe6669f..a395f356c 100644
--- a/collectors/python.d.plugin/couchdb/couchdb.chart.py
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
@@ -6,8 +6,8 @@
from collections import namedtuple, defaultdict
from json import loads
-from threading import Thread
from socket import gethostbyname, gaierror
+from threading import Thread
try:
from queue import Queue
@@ -16,10 +16,8 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService
-
update_every = 1
-
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
OVERVIEW_STATS = [
@@ -127,7 +125,7 @@ CHARTS = {
['couchdb_httpd_request_methods_GET', 'GET', 'incremental'],
['couchdb_httpd_request_methods_HEAD', 'HEAD', 'incremental'],
['couchdb_httpd_request_methods_OPTIONS', 'OPTIONS',
- 'incremental'],
+ 'incremental'],
['couchdb_httpd_request_methods_POST', 'POST', 'incremental'],
['couchdb_httpd_request_methods_PUT', 'PUT', 'incremental']
]
@@ -141,13 +139,13 @@ CHARTS = {
['couchdb_httpd_status_codes_201', '201 Created', 'incremental'],
['couchdb_httpd_status_codes_202', '202 Accepted', 'incremental'],
['couchdb_httpd_status_codes_2xx', 'Other 2xx Success',
- 'incremental'],
+ 'incremental'],
['couchdb_httpd_status_codes_3xx', '3xx Redirection',
- 'incremental'],
+ 'incremental'],
['couchdb_httpd_status_codes_4xx', '4xx Client error',
- 'incremental'],
+ 'incremental'],
['couchdb_httpd_status_codes_5xx', '5xx Server error',
- 'incremental']
+ 'incremental']
]
},
'open_files': {
@@ -280,19 +278,19 @@ class Service(UrlService):
if self._get_raw_data(self.url + '/' + db)]
for db in self.dbs:
self.definitions['db_sizes_file']['lines'].append(
- ['db_'+db+'_sizes_file', db, 'absolute', 1, 1000]
+ ['db_' + db + '_sizes_file', db, 'absolute', 1, 1000]
)
self.definitions['db_sizes_external']['lines'].append(
- ['db_'+db+'_sizes_external', db, 'absolute', 1, 1000]
+ ['db_' + db + '_sizes_external', db, 'absolute', 1, 1000]
)
self.definitions['db_sizes_active']['lines'].append(
- ['db_'+db+'_sizes_active', db, 'absolute', 1, 1000]
+ ['db_' + db + '_sizes_active', db, 'absolute', 1, 1000]
)
self.definitions['db_doc_counts']['lines'].append(
- ['db_'+db+'_doc_count', db, 'absolute']
+ ['db_' + db + '_doc_count', db, 'absolute']
)
self.definitions['db_doc_del_counts']['lines'].append(
- ['db_'+db+'_doc_del_count', db, 'absolute']
+ ['db_' + db + '_doc_del_count', db, 'absolute']
)
return UrlService.check(self)
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
index ebf34a3d6..e1fde7471 100644
--- a/collectors/python.d.plugin/dns_query_time/README.md
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -1,6 +1,12 @@
-# dns_query_time
+<!--
+title: "DNS query RTT monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dns_query_time/README.md
+sidebar_label: "DNS query RTT"
+-->
-This module provides DNS query time statistics.
+# DNS query RTT monitoring with Netdata
+
+Measures DNS query round trip time.
**Requirement:**
@@ -8,6 +14,16 @@ This module provides DNS query time statistics.
It produces one aggregate chart or one chart per DNS server, showing the query time.
+## Configuration
+
+Edit the `python.d/dns_query_time.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dns_query_time.conf
+```
+
---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdns_query_time%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
index 7fe860314..7e1cb32b3 100644
--- a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
@@ -11,6 +11,7 @@ try:
import dns.message
import dns.query
import dns.name
+
DNS_PYTHON = True
except ImportError:
DNS_PYTHON = False
@@ -22,7 +23,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-
update_every = 5
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
index 4310fe28a..7c279efaf 100644
--- a/collectors/python.d.plugin/dnsdist/README.md
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -1,8 +1,12 @@
-# dnsdist
+<!--
+title: "PowerDNS dnsdist monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dnsdist/README.md
+sidebar_label: "PowerDNS dnsdist"
+-->
-This module monitors dnsdist performance and health metrics.
+# PowerDNS dnsdist monitoring with Netdata
-The module draws the following charts:
+Collects load-balancer performance and health metrics, and draws the following charts:
1. **Response latency**
@@ -47,6 +51,14 @@ The module draws the following charts:
## Configuration
+Edit the `python.d/dnsdist.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dnsdist.conf
+```
+
```yaml
localhost:
name : 'local'
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
index d60858659..7e947923f 100644
--- a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
@@ -5,7 +5,6 @@ from json import loads
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'queries',
'queries_dropped',
@@ -21,7 +20,6 @@ ORDER = [
'query_latency_avg'
]
-
CHARTS = {
'queries': {
'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
@@ -107,7 +105,7 @@ CHARTS = {
]
},
'query_latency_avg': {
- 'options': [None, 'Average latency for the last N queries', 'ms/query', 'latency',
+ 'options': [None, 'Average latency for the last N queries', 'microseconds', 'latency',
'dnsdist.query_latency_avg', 'line'],
'lines': [
['latency-avg100', '100', 'absolute'],
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
index ec69262fa..178bae2cc 100644
--- a/collectors/python.d.plugin/dockerd/README.md
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -1,6 +1,12 @@
-# dockerd
+<!--
+title: "Docker Engine monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dockerd/README.md
+sidebar_label: "Docker Engine"
+-->
-Module monitor docker health metrics.
+# Docker Engine monitoring with Netdata
+
+Collects docker container health metrics.
**Requirement:**
@@ -20,7 +26,15 @@ Following charts are drawn:
- count
-## configuration
+## Configuration
+
+Edit the `python.d/dockerd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dockerd.conf
+```
```yaml
update_every : 1
diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
index 8bd45df9e..bd9640bbf 100644
--- a/collectors/python.d.plugin/dockerd/dockerd.chart.py
+++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py
@@ -4,14 +4,14 @@
try:
import docker
+
HAS_DOCKER = True
except ImportError:
HAS_DOCKER = False
-from bases.FrameworkServices.SimpleService import SimpleService
-
from distutils.version import StrictVersion
+from bases.FrameworkServices.SimpleService import SimpleService
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
@@ -44,7 +44,6 @@ CHARTS = {
}
}
-
MIN_REQUIRED_VERSION = '3.2.0'
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
index 6048f1a63..730b64257 100644
--- a/collectors/python.d.plugin/dovecot/README.md
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -1,6 +1,12 @@
-# dovecot
+<!--
+title: "Dovecot monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dovecot/README.md
+sidebar_label: "Dovecot"
+-->
-This module provides statistics information from Dovecot server.
+# Dovecot monitoring with Netdata
+
+Provides statistics information from Dovecot server.
Statistics are taken from dovecot socket by executing `EXPORT global` command.
More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
@@ -32,8 +38,8 @@ Module gives information with following charts:
5. **Context Switches**
- - volountary
- - involountary
+ - voluntary
+ - involuntary
6. **disk** in bytes/s
@@ -69,7 +75,15 @@ Module gives information with following charts:
- hit
- miss
-## configuration
+## Configuration
+
+Edit the `python.d/dovecot.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/dovecot.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
index be1fa53d5..dfaef28b5 100644
--- a/collectors/python.d.plugin/dovecot/dovecot.chart.py
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -5,10 +5,8 @@
from bases.FrameworkServices.SocketService import SocketService
-
UNIX_SOCKET = '/var/run/dovecot/stats'
-
ORDER = [
'sessions',
'logins',
@@ -51,7 +49,8 @@ CHARTS = {
]
},
'context_switches': {
- 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches', 'line'],
+ 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches',
+ 'line'],
'lines': [
['vol_cs', 'voluntary', 'absolute'],
['invol_cs', 'involuntary', 'absolute']
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
index 211dfabfa..d8d7581bc 100644
--- a/collectors/python.d.plugin/elasticsearch/README.md
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -1,6 +1,12 @@
-# elasticsearch
+<!--
+title: "Elasticsearch monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/elasticsearch/README.md
+sidebar_label: "Elasticsearch"
+-->
-This module monitors [Elasticsearch](https://www.elastic.co/products/elasticsearch) performance and health metrics.
+# Elasticsearch monitoring with Netdata
+
+Monitors [Elasticsearch](https://www.elastic.co/products/elasticsearch) performance and health metrics.
It produces:
@@ -16,7 +22,7 @@ It produces:
- Time spent on indexing, refreshing, flushing
- Indexing and flushing latency
-3. **Memory usage and garbace collection** charts:
+3. **Memory usage and garbage collection** charts:
- JVM heap currently in use, committed
- Count of garbage collections
@@ -58,7 +64,15 @@ It produces:
- Num of replicas
- Health status
-## configuration
+## Configuration
+
+Edit the `python.d/elasticsearch.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/elasticsearch.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
index 8aaa08583..dddf50b4c 100644
--- a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
@@ -245,7 +245,7 @@ CHARTS = {
'elastic.index_translog_operations', 'area'],
'lines': [
['indices_translog_operations', 'total', 'absolute'],
- ['indices_translog_uncommitted_operations', 'uncommited', 'absolute']
+ ['indices_translog_uncommitted_operations', 'uncommitted', 'absolute']
]
},
'index_translog_size': {
@@ -253,7 +253,7 @@ CHARTS = {
'elastic.index_translog_size', 'area'],
'lines': [
['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
- ['indices_translog_uncommitted_size_in_bytes', 'uncommited', 'absolute', 1, 1048567]
+ ['indices_translog_uncommitted_size_in_bytes', 'uncommitted', 'absolute', 1, 1048567]
]
},
'index_segments_count': {
@@ -295,7 +295,7 @@ CHARTS = {
'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc',
'elastic.jvm_heap_bytes', 'area'],
'lines': [
- ['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
+ ['jvm_mem_heap_committed_in_bytes', 'committed', 'absolute', 1, 1048576],
['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576]
]
},
@@ -513,6 +513,8 @@ def convert_index_store_size_to_bytes(size):
return round(float(size[:-2]) * 1024 * 1024)
elif size.endswith('gb'):
return round(float(size[:-2]) * 1024 * 1024 * 1024)
+ elif size.endswith('tb'):
+ return round(float(size[:-2]) * 1024 * 1024 * 1024 * 1024)
elif size.endswith('b'):
return round(float(size[:-1]))
return -1
diff --git a/collectors/python.d.plugin/energid/README.md b/collectors/python.d.plugin/energid/README.md
index fc5101590..60c829fed 100644
--- a/collectors/python.d.plugin/energid/README.md
+++ b/collectors/python.d.plugin/energid/README.md
@@ -1,9 +1,15 @@
-# energid
+<!--
+title: "Energi Core node monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/energid/README.md
+sidebar_label: "Energi Core"
+-->
-A collector for [Energi Core](https://github.com/energicryptocurrency/energi)
-node instance monitoring.
+# Energi Core node monitoring with Netdata
-As Energi Core Gen 1 & 2 are based on the original Bitcoin code and
+Monitors blockchain, memory, network and unspent transactions statistics.
+
+
+As [Energi Core](https://github.com/energicryptocurrency/energi) Gen 1 & 2 are based on the original Bitcoin code and
supports very similar JSON RPC, there is quite high chance the module works
with many others forks including bitcoind itself.
@@ -42,6 +48,14 @@ long daemon startup.
## Configuration
+Edit the `python.d/energid.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/energid.conf
+```
+
Sample:
```yaml
diff --git a/collectors/python.d.plugin/energid/energid.chart.py b/collectors/python.d.plugin/energid/energid.chart.py
index b8aa89e50..079c32dc8 100644
--- a/collectors/python.d.plugin/energid/energid.chart.py
+++ b/collectors/python.d.plugin/energid/energid.chart.py
@@ -41,9 +41,9 @@ CHARTS = {
'mempool': {
'options': [None, 'MemPool', 'MiB', 'memory', 'energid.mempool', 'area'],
'lines': [
- ['mempool_max', 'Max', 'absolute', None, 1024*1024],
- ['mempool_current', 'Usage', 'absolute', None, 1024*1024],
- ['mempool_txsize', 'TX Size', 'absolute', None, 1024*1024],
+ ['mempool_max', 'Max', 'absolute', None, 1024 * 1024],
+ ['mempool_current', 'Usage', 'absolute', None, 1024 * 1024],
+ ['mempool_txsize', 'TX Size', 'absolute', None, 1024 * 1024],
],
},
'secmem': {
@@ -93,22 +93,23 @@ METHODS = {
'mempool_max': r['maxmempool'],
},
'getmemoryinfo': lambda r: dict([
- ('secmem_' + k, v) for (k,v) in r['locked'].items()
+ ('secmem_' + k, v) for (k, v) in r['locked'].items()
]),
'getnetworkinfo': lambda r: {
- 'network_timeoffset' : r['timeoffset'],
+ 'network_timeoffset': r['timeoffset'],
'network_connections': r['connections'],
},
'gettxoutsetinfo': lambda r: {
- 'utxo_count' : r['txouts'],
- 'utxo_xfers' : r['transactions'],
- 'utxo_size' : r['disk_size'],
- 'utxo_amount' : r['total_amount'],
+ 'utxo_count': r['txouts'],
+ 'utxo_xfers': r['transactions'],
+ 'utxo_size': r['disk_size'],
+ 'utxo_amount': r['total_amount'],
},
}
JSON_RPC_VERSION = '1.1'
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
index 699ebe69c..561ea62ed 100644
--- a/collectors/python.d.plugin/example/README.md
+++ b/collectors/python.d.plugin/example/README.md
@@ -1,4 +1,9 @@
-# example
+<!--
+title: "Example"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/example/README.md
+-->
+
+# Example
An example python data collection module.
You can use this example to help you [write a new Python module](../#how-to-write-a-new-module).
diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
index cc8c18759..61ae47f22 100644
--- a/collectors/python.d.plugin/example/example.chart.py
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -7,7 +7,6 @@ from random import SystemRandom
from bases.FrameworkServices.SimpleService import SimpleService
-
priority = 90000
ORDER = [
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
index 985bd6e36..240aa7bed 100644
--- a/collectors/python.d.plugin/exim/README.md
+++ b/collectors/python.d.plugin/exim/README.md
@@ -1,8 +1,33 @@
-# exim
+<!--
+title: "Exim monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/exim/README.md
+sidebar_label: "Exim"
+-->
+
+# Exim monitoring with Netdata
Simple module executing `exim -bpc` to grab exim queue.
This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
+## Requirements
+
+The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.
+
+1. Edit the `exim` configuration with your preferred editor and add:
+`queue_list_requires_admin = false`
+2. Restart `exim` and Netdata
+
+*WHM (CPanel) server*
+
+On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.
+
+1. Login to WHM
+2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor
+3. Scroll down to the button **Add additional configuration setting** and click on it.
+4. In the new dropdown which will appear above we need to find and choose:
+`queue_list_requires_admin` and set to `false`
+5. Scroll to the end and click the **Save** button.
+
It produces only one chart:
1. **Exim Queue Emails**
diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
index 68b7b5cfb..7238a1bea 100644
--- a/collectors/python.d.plugin/exim/exim.chart.py
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.ExecutableService import ExecutableService
-
EXIM_COMMAND = 'exim -bpc'
ORDER = [
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
index 1ab0f6f63..c1ad994a5 100644
--- a/collectors/python.d.plugin/fail2ban/README.md
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -1,14 +1,28 @@
-# fail2ban
+<!--
+title: "Fail2ban monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/fail2ban/README.md
+sidebar_label: "Fail2ban"
+-->
-Module monitor fail2ban log file to show all bans for all active jails
+# Fail2ban monitoring with Netdata
-**Requirements:**
+Monitors the fail2ban log file to show all bans for all active jails.
+
+## Requirements
- fail2ban.log file MUST BE readable by Netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
It produces one chart with multiple lines (one line per jail)
-## configuration
+## Configuration
+
+Edit the `python.d/fail2ban.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/fail2ban.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
index 9f5f2dcc4..99dbf79dd 100644
--- a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
@@ -3,15 +3,13 @@
# Author: ilyam8
# SPDX-License-Identifier: GPL-3.0-or-later
-import re
import os
-
+import re
from collections import defaultdict
from glob import glob
from bases.FrameworkServices.LogService import LogService
-
ORDER = [
'jails_bans',
'jails_in_jail',
@@ -25,13 +23,13 @@ def charts(jails):
ch = {
ORDER[0]: {
- 'options': [None, 'Jails Ban Rate', 'bans/s', 'bans', 'jail.bans', 'line'],
- 'lines': []
+ 'options': [None, 'Jails Ban Rate', 'bans/s', 'bans', 'jail.bans', 'line'],
+ 'lines': []
},
ORDER[1]: {
- 'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', 'in jail',
- 'jail.in_jail', 'line'],
- 'lines': []
+ 'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', 'in jail',
+ 'jail.in_jail', 'line'],
+ 'lines': []
},
}
for jail in jails:
@@ -52,7 +50,7 @@ def charts(jails):
return ch
-RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
+RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= +(true|yes|false|no)')
# Example:
# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33
@@ -198,9 +196,9 @@ class Service(LogService):
if name in exclude:
continue
- if status == 'true' and name not in active_jails:
+ if status in ('true','yes') and name not in active_jails:
active_jails.append(name)
- elif status == 'false' and name in active_jails:
+ elif status in ('false','no') and name in active_jails:
active_jails.remove(name)
return active_jails or DEFAULT_JAILS
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
index 3a2cdf9b4..2993c8952 100644
--- a/collectors/python.d.plugin/freeradius/README.md
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -1,4 +1,10 @@
-# freeradius
+<!--
+title: "FreeRADIUS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/freeradius/README.md
+sidebar_label: "FreeRADIUS"
+-->
+
+# FreeRADIUS monitoring with Netdata
Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
@@ -44,7 +50,15 @@ It produces:
- proxy-acct-malformed-requests
- proxy-acct-unknown-typesa
-## configuration
+## Configuration
+
+Edit the `python.d/freeradius.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/freeradius.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
index 9022d5e60..161d57e07 100644
--- a/collectors/python.d.plugin/freeradius/freeradius.chart.py
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
@@ -6,8 +6,8 @@
import re
from subprocess import Popen, PIPE
-from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
update_every = 15
diff --git a/collectors/python.d.plugin/gearman/README.md b/collectors/python.d.plugin/gearman/README.md
index cbb4da3e2..b9fc914bf 100644
--- a/collectors/python.d.plugin/gearman/README.md
+++ b/collectors/python.d.plugin/gearman/README.md
@@ -1,8 +1,12 @@
-# Gearman
+<!--
+title: "Gearman monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/gearman/README.md
+sidebar_label: "Gearman"
+-->
-Module monitors Gearman worker statistics. A chart
-is shown for each job as well as one showing a summary
-of all workers.
+# Gearman monitoring with Netdata
+
+Monitors Gearman worker statistics. A chart is shown for each job as well as one showing a summary of all workers.
Note: Charts may show as a line graph rather than an area
graph if you load Netdata with no jobs running. To change
@@ -20,7 +24,15 @@ It produces:
* Workers idle
* Workers running
-### configuration
+## Configuration
+
+Edit the `python.d/gearman.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/gearman.conf
+```
```yaml
localhost:
@@ -36,4 +48,4 @@ localhost:
When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:4730`.
----
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgearman%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/gearman/gearman.chart.py b/collectors/python.d.plugin/gearman/gearman.chart.py
index 26f3533c4..5e280a4d8 100644
--- a/collectors/python.d.plugin/gearman/gearman.chart.py
+++ b/collectors/python.d.plugin/gearman/gearman.chart.py
@@ -4,9 +4,9 @@
# Gearman Netdata Plugin
-from bases.FrameworkServices.SocketService import SocketService
from copy import deepcopy
+from bases.FrameworkServices.SocketService import SocketService
CHARTS = {
'total_workers': {
@@ -29,6 +29,7 @@ def job_chart_template(job_name):
]
}
+
def build_result_dict(job):
"""
Get the status for each job
@@ -46,6 +47,7 @@ def build_result_dict(job):
'{0}_running'.format(job['job_name']): running,
}
+
def parse_worker_data(job):
job_name = job[0]
job_metrics = job[1:]
@@ -119,6 +121,7 @@ class Service(SocketService):
Example output returned from
_get_raw_data():
+ prefix generic_worker4 78 78 500
generic_worker2 78 78 500
generic_worker3 0 0 760
generic_worker1 0 0 500
@@ -135,13 +138,24 @@ class Service(SocketService):
self.debug("Gearman returned no data")
raise GearmanReadException()
- job_lines = raw.splitlines()[:-1]
- job_lines = [job.split() for job in sorted(job_lines)]
+ workers = list()
+
+ for line in raw.splitlines()[:-1]:
+ parts = line.split()
+ if not parts:
+ continue
+
+ name = '_'.join(parts[:-3])
+ try:
+ values = [int(w) for w in parts[-3:]]
+ except ValueError:
+ continue
- for line in job_lines:
- line[1:] = map(int, line[1:])
+ w = [name]
+ w.extend(values)
+ workers.append(w)
- return job_lines
+ return workers
def process_jobs(self, active_jobs):
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
index 7d78fabd0..a73610e7a 100644
--- a/collectors/python.d.plugin/go_expvar/README.md
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -1,11 +1,14 @@
-# go_expvar
+<!--
+title: "Go applications monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/go_expvar/README.md
+sidebar_label: "Go applications"
+-->
-The `go_expvar` module can monitor any Go application that exposes its metrics with the use of
-`expvar` package from the Go standard library.
+# Go applications monitoring with Netdata
-`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts.
+Monitors Go application that exposes its metrics with the use of `expvar` package from the Go standard library. The package produces charts for Go runtime memory statistics and optionally any number of custom charts.
-For the memory statistics, it produces the following charts:
+The `go_expvar` module produces the following charts:
1. **Heap allocations** in kB
@@ -36,7 +39,7 @@ For the memory statistics, it produces the following charts:
- avg: average duration of all GC stop-the-world pauses
-## Monitoring Go Applications
+## Monitoring Go applications
Netdata can be used to monitor running Go applications that expose their metrics with
the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library.
@@ -66,7 +69,7 @@ Sample output:
```json
{
"cmdline": ["./expvar-demo-binary"],
-"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <ommited for brevity>}
+"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <omitted for brevity>}
}
```
@@ -112,9 +115,8 @@ the use of `netdata`s `go_expvar` module.
### Using Netdata go_expvar module
-The `go_expvar` module is disabled by default. To enable it, edit [`python.d.conf`](../python.d.conf)
-(to edit it on your system run `/etc/netdata/edit-config python.d.conf`), and change the `go_expvar`
-variable to `yes`:
+The `go_expvar` module is disabled by default. To enable it, edit `python.d.conf` (to edit it on your system run
+`/etc/netdata/edit-config python.d.conf`), and change the `go_expvar` variable to `yes`:
```
# Enable / Disable python.d.plugin modules
@@ -130,10 +132,9 @@ go_expvar: yes
...
```
-Next, we need to edit the module configuration file (found at [`/etc/netdata/python.d/go_expvar.conf`](go_expvar.conf) by default)
-(to edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`).
-The module configuration consists of jobs, where each job can be used to monitor a separate Go application.
-Let's see a sample job configuration:
+Next, we need to edit the module configuration file (found at `/etc/netdata/python.d/go_expvar.conf` by default) (to
+edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`). The module configuration consists of
+jobs, where each job can be used to monitor a separate Go application. Let's see a sample job configuration:
```
# /etc/netdata/python.d/go_expvar.conf
@@ -208,8 +209,8 @@ See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-28449
Please see these two links to the official Netdata documentation for more information about the values:
-- [External plugins - charts](../../plugins.d/#chart)
-- [Chart variables](../#global-variables-order-and-chart)
+- [External plugins - charts](/collectors/plugins.d/README.md#chart)
+- [Chart variables](/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
**Line definitions**
@@ -232,7 +233,7 @@ hidden: False
```
Please see the following link for more information about the options and their default values:
-[External plugins - dimensions](../../plugins.d/#dimension)
+[External plugins - dimensions](/collectors/plugins.d/README.md#dimension)
Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
All dicts in the resulting JSON document are then flattened to one level.
@@ -251,7 +252,28 @@ In the above case, the exported variables will be available under `runtime.gorou
`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
the first defined key wins and all subsequent keys with the same name are ignored.
-**Configuration example**
+## Enable the collector
+
+The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `go_expvar` collector.
+
+## Configuration
+
+Edit the `python.d/go_expvar.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/go_expvar.conf
+```
The configuration below matches the second Go application described above.
Netdata will monitor and chart memory stats for the application, as well as a custom chart of
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
index e82a87761..f9bbdc164 100644
--- a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -4,13 +4,12 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import division
-import json
+import json
from collections import namedtuple
from bases.FrameworkServices.UrlService import UrlService
-
MEMSTATS_ORDER = [
'memstats_heap',
'memstats_stack',
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
index c4bb0447e..33d34f1ad 100644
--- a/collectors/python.d.plugin/haproxy/README.md
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -1,6 +1,12 @@
-# haproxy
+<!--
+title: "HAProxy monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/haproxy/README.md
+sidebar_label: "HAProxy"
+-->
-Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
+# HAProxy monitoring with Netdata
+
+Monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
And health metrics such as backend servers status (server check should be used).
Plugin can obtain data from url **OR** unix socket.
@@ -28,7 +34,15 @@ It produces:
- number of failed servers for every backend (in DOWN state)
-## configuration
+## Configuration
+
+Edit the `python.d/haproxy.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/haproxy.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
index 8df712943..6f94c9a07 100644
--- a/collectors/python.d.plugin/haproxy/haproxy.chart.py
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -179,7 +179,6 @@ CHARTS = {
}
}
-
METRICS = {
'bin': {'algorithm': 'incremental', 'divisor': 1024},
'bout': {'algorithm': 'incremental', 'divisor': 1024},
@@ -193,7 +192,6 @@ METRICS = {
'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
}
-
BACKEND_METRICS = {
'qtime': {'algorithm': 'absolute', 'divisor': 1},
'ctime': {'algorithm': 'absolute', 'divisor': 1},
@@ -201,7 +199,6 @@ BACKEND_METRICS = {
'ttime': {'algorithm': 'absolute', 'divisor': 1}
}
-
REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
@@ -309,7 +306,7 @@ class Service(UrlService, SocketService):
name, METRICS[metric]['algorithm'], 1,
METRICS[metric]['divisor']])
self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
+ name, 'incremental', 1, 1])
for back in self.data['backend']:
name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
for metric in METRICS:
@@ -317,7 +314,7 @@ class Service(UrlService, SocketService):
name, METRICS[metric]['algorithm'], 1,
METRICS[metric]['divisor']])
self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
+ name, 'incremental', 1, 1])
for metric in BACKEND_METRICS:
self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
name, BACKEND_METRICS[metric]['algorithm'], 1,
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
index 03474c893..aaaf21421 100644
--- a/collectors/python.d.plugin/hddtemp/README.md
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -1,13 +1,27 @@
-# hddtemp
+<!--
+title: "Hard drive temperature monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hddtemp/README.md
+sidebar_label: "Hard drive temperature"
+-->
-Module monitors disk temperatures from one or more hddtemp daemons.
+# Hard drive temperature monitoring with Netdata
+
+Monitors disk temperatures from one or more `hddtemp` daemons.
**Requirement:**
Running `hddtemp` in daemonized mode with access on tcp port
It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
-## configuration
+## Configuration
+
+Edit the `python.d/hddtemp.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/hddtemp.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
index b5aaaeb39..6427aa180 100644
--- a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
@@ -6,12 +6,10 @@
import re
-
from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService
-
ORDER = [
'temperatures',
]
@@ -30,7 +28,7 @@ class Disk:
def __init__(self, id_, name, temp):
self.id = id_.split('/')[-1]
self.name = name.replace(' ', '_')
- self.temp = temp if temp.isdigit() else 0
+ self.temp = temp if temp.isdigit() else None
def __repr__(self):
return self.id
diff --git a/collectors/charts.d.plugin/squid/Makefile.inc b/collectors/python.d.plugin/hpssa/Makefile.inc
index ad470d88c..1c04aa49c 100644
--- a/collectors/charts.d.plugin/squid/Makefile.inc
+++ b/collectors/python.d.plugin/hpssa/Makefile.inc
@@ -5,9 +5,9 @@
# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
# install these files
-dist_charts_DATA += squid/squid.chart.sh
-dist_chartsconfig_DATA += squid/squid.conf
+dist_python_DATA += hpssa/hpssa.chart.py
+dist_pythonconfig_DATA += hpssa/hpssa.conf
# do not install these files, but include them in the distribution
-dist_noinst_DATA += squid/README.md squid/Makefile.inc
+dist_noinst_DATA += hpssa/README.md hpssa/Makefile.inc
diff --git a/collectors/python.d.plugin/hpssa/README.md b/collectors/python.d.plugin/hpssa/README.md
new file mode 100644
index 000000000..2079ff2ad
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/README.md
@@ -0,0 +1,61 @@
+<!--
+title: "HP Smart Storage Arrays monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hpssa/README.md
+sidebar_label: "HP Smart Storage Arrays"
+-->
+
+# HP Smart Storage Arrays monitoring with Netdata
+
+Monitors controller, cache module, logical and physical drive state and temperature using `ssacli` tool.
+
+## Requirements:
+
+This module uses `ssacli`, which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `ssacli` as root without password.
+
+Add to `sudoers`:
+
+```
+netdata ALL=(root) NOPASSWD: /path/to/ssacli
+```
+
+To collect metrics, the module executes: `sudo -n ssacli ctrl all show config detail`
+
+This module produces:
+
+1. Controller state and temperature
+2. Cache module state and temperature
+3. Logical drive state
+4. Physical drive state and temperature
+
+## Enable the collector
+
+The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `hpssa` collector.
+
+## Configuration
+
+Edit the `python.d/hpssa.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/hpssa.conf
+```
+
+If `ssacli` cannot be found in the `PATH`, configure it in `hpssa.conf`.
+
+```yaml
+ssacli_path: /usr/sbin/ssacli
+```
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhpssa%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/hpssa/hpssa.chart.py b/collectors/python.d.plugin/hpssa/hpssa.chart.py
new file mode 100644
index 000000000..ce1b43009
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/hpssa.chart.py
@@ -0,0 +1,395 @@
+# -*- coding: utf-8 -*-
+# Description: hpssa netdata python.d module
+# Author: Peter Gnodde (gnoddep)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from copy import deepcopy
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+update_every = 5
+
+ORDER = [
+ 'ctrl_status',
+ 'ctrl_temperature',
+ 'ld_status',
+ 'pd_status',
+ 'pd_temperature',
+]
+
+CHARTS = {
+ 'ctrl_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Controller',
+ 'hpssa.ctrl_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'ctrl_temperature': {
+ 'options': [
+ None,
+ 'Temperature',
+ 'Celsius',
+ 'Controller',
+ 'hpssa.ctrl_temperature',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'ld_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Logical drives',
+ 'hpssa.ld_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'pd_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Physical drives',
+ 'hpssa.pd_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'pd_temperature': {
+ 'options': [
+ None,
+ 'Temperature',
+ 'Celsius',
+ 'Physical drives',
+ 'hpssa.pd_temperature',
+ 'line'
+ ],
+ 'lines': []
+ }
+}
+
+adapter_regex = re.compile(r'^(?P<adapter_type>.+) in Slot (?P<slot>\d+)')
+ignored_sections_regex = re.compile(
+ r'''
+ ^
+ Physical[ ]Drives
+ | None[ ]attached
+ | (?:Expander|Enclosure|SEP|Port[ ]Name:)[ ].+
+ | .+[ ]at[ ]Port[ ]\S+,[ ]Box[ ]\d+,[ ].+
+ | Mirror[ ]Group[ ]\d+:
+ $
+ ''',
+ re.X
+)
+mirror_group_regex = re.compile(r'^Mirror Group \d+:$')
+array_regex = re.compile(r'^Array: (?P<id>[A-Z]+)$')
+drive_regex = re.compile(
+ r'''
+ ^
+ Logical[ ]Drive:[ ](?P<logical_drive_id>\d+)
+ | physicaldrive[ ](?P<fqn>[^:]+:\d+:\d+)
+ $
+ ''',
+ re.X
+)
+key_value_regex = re.compile(r'^(?P<key>[^:]+): ?(?P<value>.*)$')
+ld_status_regex = re.compile(r'^Status: (?P<status>[^,]+)(?:, (?P<percentage>[0-9.]+)% complete)?$')
+error_match = re.compile(r'Error:')
+
+
+class HPSSAException(Exception):
+ pass
+
+
+class HPSSA(object):
+ def __init__(self, lines):
+ self.lines = [line.strip() for line in lines if line.strip()]
+ self.current_line = 0
+ self.adapters = []
+ self.parse()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.current_line == len(self.lines):
+ raise StopIteration
+
+ line = self.lines[self.current_line]
+ self.current_line += 1
+
+ return line
+
+ def next(self):
+ """
+ This is for Python 2.7 compatibility
+ """
+ return self.__next__()
+
+ def rewind(self):
+ self.current_line = max(self.current_line - 1, 0)
+
+ @staticmethod
+ def match_any(line, *regexes):
+ return any([regex.match(line) for regex in regexes])
+
+ def parse(self):
+ for line in self:
+ match = adapter_regex.match(line)
+ if match:
+ self.adapters.append(self.parse_adapter(**match.groupdict()))
+
+ def parse_adapter(self, slot, adapter_type):
+ adapter = {
+ 'slot': int(slot),
+ 'type': adapter_type,
+
+ 'controller': {
+ 'status': None,
+ 'temperature': None,
+ },
+ 'cache': {
+ 'present': False,
+ 'status': None,
+ 'temperature': None,
+ },
+ 'battery': {
+ 'status': None,
+ 'count': 0,
+ },
+
+ 'logical_drives': [],
+ 'physical_drives': [],
+ }
+
+ for line in self:
+ if error_match.match(line):
+ raise HPSSAException('Error: {}'.format(line))
+ elif adapter_regex.match(line):
+ self.rewind()
+ break
+ elif array_regex.match(line):
+ self.parse_array(adapter)
+ elif line == 'Unassigned' or line == 'HBA Drives':
+ self.parse_physical_drives(adapter)
+ elif ignored_sections_regex.match(line):
+ self.parse_ignored_section()
+ else:
+ match = key_value_regex.match(line)
+ if match:
+ key, value = match.group('key', 'value')
+ if key == 'Controller Status':
+ adapter['controller']['status'] = value == 'OK'
+ elif key == 'Controller Temperature (C)':
+ adapter['controller']['temperature'] = int(value)
+ elif key == 'Cache Board Present':
+ adapter['cache']['present'] = value == 'True'
+ elif key == 'Cache Status':
+ adapter['cache']['status'] = value == 'OK'
+ elif key == 'Cache Module Temperature (C)':
+ adapter['cache']['temperature'] = int(value)
+ elif key == 'Battery/Capacitor Count':
+ adapter['battery']['count'] = int(value)
+ elif key == 'Battery/Capacitor Status':
+ adapter['battery']['status'] = value == 'OK'
+ else:
+ raise HPSSAException('Cannot parse line: {}'.format(line))
+
+ return adapter
+
+ def parse_array(self, adapter):
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, ignored_sections_regex):
+ self.rewind()
+ break
+
+ match = drive_regex.match(line)
+ if match:
+ data = match.groupdict()
+ if data['logical_drive_id']:
+ self.parse_logical_drive(adapter, int(data['logical_drive_id']))
+ else:
+ self.parse_physical_drive(adapter, data['fqn'])
+ elif not key_value_regex.match(line):
+ self.rewind()
+ break
+
+ def parse_physical_drives(self, adapter):
+ for line in self:
+ match = drive_regex.match(line)
+ if match:
+ self.parse_physical_drive(adapter, match.group('fqn'))
+ else:
+ self.rewind()
+ break
+
+ def parse_logical_drive(self, adapter, logical_drive_id):
+ ld = {
+ 'id': logical_drive_id,
+ 'status': None,
+ 'status_complete': None,
+ }
+
+ for line in self:
+ if mirror_group_regex.match(line):
+ self.parse_ignored_section()
+ continue
+
+ match = ld_status_regex.match(line)
+ if match:
+ ld['status'] = match.group('status') == 'OK'
+
+ if match.group('percentage'):
+ ld['status_complete'] = float(match.group('percentage')) / 100
+ elif HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
+ or not key_value_regex.match(line):
+ self.rewind()
+ break
+
+ adapter['logical_drives'].append(ld)
+
+ def parse_physical_drive(self, adapter, fqn):
+ pd = {
+ 'fqn': fqn,
+ 'status': None,
+ 'temperature': None,
+ }
+
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex):
+ self.rewind()
+ break
+
+ match = key_value_regex.match(line)
+ if match:
+ key, value = match.group('key', 'value')
+ if key == 'Status':
+ pd['status'] = value == 'OK'
+ elif key == 'Current Temperature (C)':
+ pd['temperature'] = int(value)
+ else:
+ self.rewind()
+ break
+
+ adapter['physical_drives'].append(pd)
+
+ def parse_ignored_section(self):
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
+ or not key_value_regex.match(line):
+ self.rewind()
+ break
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.ssacli_path = self.configuration.get('ssacli_path', 'ssacli')
+ self.use_sudo = self.configuration.get('use_sudo', True)
+ self.cmd = []
+
+ def get_adapters(self):
+ try:
+ adapters = HPSSA(self._get_raw_data(command=self.cmd)).adapters
+ if not adapters:
+ # If no adapters are returned, run the command again but capture stderr
+ err = self._get_raw_data(command=self.cmd, stderr=True)
+ if err:
+ raise HPSSAException('Error executing cmd {}: {}'.format(' '.join(self.cmd), '\n'.join(err)))
+ return adapters
+ except HPSSAException as ex:
+ self.error(ex)
+ return []
+
+ def check(self):
+ if not os.path.isfile(self.ssacli_path):
+ ssacli_path = find_binary(self.ssacli_path)
+ if ssacli_path:
+ self.ssacli_path = ssacli_path
+ else:
+ self.error('Cannot locate "{}" binary'.format(self.ssacli_path))
+ return False
+
+ if self.use_sudo:
+ sudo = find_binary('sudo')
+ if not sudo:
+ self.error('Cannot locate "{}" binary'.format('sudo'))
+ return False
+
+ allowed = self._get_raw_data(command=[sudo, '-n', '-l', self.ssacli_path])
+ if not allowed or allowed[0].strip() != os.path.realpath(self.ssacli_path):
+ self.error('Not allowed to run sudo for command {}'.format(self.ssacli_path))
+ return False
+
+ self.cmd = [sudo, '-n']
+
+ self.cmd.extend([self.ssacli_path, 'ctrl', 'all', 'show', 'config', 'detail'])
+ self.info('Command: {}'.format(self.cmd))
+
+ adapters = self.get_adapters()
+
+ self.info('Discovered adapters: {}'.format([adapter['type'] for adapter in adapters]))
+ if not adapters:
+ self.error('No adapters discovered')
+ return False
+
+ return True
+
+ def get_data(self):
+ netdata = {}
+
+ for adapter in self.get_adapters():
+ status_key = '{}_status'.format(adapter['slot'])
+ temperature_key = '{}_temperature'.format(adapter['slot'])
+ ld_key = 'ld_{}_'.format(adapter['slot'])
+
+ data = {
+ 'ctrl_status': {
+ 'ctrl_' + status_key: adapter['controller']['status'],
+ 'cache_' + status_key: adapter['cache']['present'] and adapter['cache']['status'],
+ 'battery_' + status_key:
+ adapter['battery']['status'] if adapter['battery']['count'] > 0 else None
+ },
+
+ 'ctrl_temperature': {
+ 'ctrl_' + temperature_key: adapter['controller']['temperature'],
+ 'cache_' + temperature_key: adapter['cache']['temperature'],
+ },
+
+ 'ld_status': {
+ ld_key + '{}_status'.format(ld['id']): ld['status'] for ld in adapter['logical_drives']
+ },
+
+ 'pd_status': {},
+ 'pd_temperature': {},
+ }
+
+ for pd in adapter['physical_drives']:
+ pd_key = 'pd_{}_{}'.format(adapter['slot'], pd['fqn'])
+ data['pd_status'][pd_key + '_status'] = pd['status']
+ data['pd_temperature'][pd_key + '_temperature'] = pd['temperature']
+
+ for chart, dimension_data in data.items():
+ for dimension_id, value in dimension_data.items():
+ if value is None:
+ continue
+
+ if dimension_id not in self.charts[chart]:
+ self.charts[chart].add_dimension([dimension_id])
+
+ netdata[dimension_id] = value
+
+ return netdata
diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/hpssa/hpssa.conf
index 68561366b..cc50c9836 100644
--- a/collectors/python.d.plugin/unbound/unbound.conf
+++ b/collectors/python.d.plugin/hpssa/hpssa.conf
@@ -1,17 +1,9 @@
-# netdata python.d.plugin configuration for unbound
+# netdata python.d.plugin configuration for hpssa
#
# This file is in YaML format. Generally the format is:
#
# name: value
#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
# ----------------------------------------------------------------------
# Global Variables
@@ -20,7 +12,7 @@
# update_every sets the default data collection frequency.
# If unset, the python.d.plugin default is used.
-# update_every: 1
+# update_every: 5
# priority controls the order of charts at the netdata dashboard.
# Lower numbers move the charts towards the top of the page.
@@ -54,32 +46,16 @@
# name: myname # the JOB's name as it will appear at the
# # dashboard (by default is the job_name)
# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
+# update_every: 5 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
-# Additionally to the above, unbound also supports the following:
-#
-# host: localhost # The host to connect to.
-# port: 8953 # WHat port to use (defaults to 8953)
-# socket: /path/to/socket # A path to a UNIX socket to use instead
-# # of a TCP connection
-# tls_key_file: /path/to/key # The key file to use for authentication
-# tls_cert_file: /path/to/key # The certificate to use for authentication
-# extended: false # Whether to collect extended stats or not
-# per_thread: false # Whether to show charts for per-thread stats
+# Additionally to the above, hpssa also supports the following:
#
-# In addition to the above, you can set the following to try and
-# auto-detect most settings based on the unbound configuration:
-#
-# ubconf: /etc/unbound/unbound.conf
-#
-# Note that the SSL key and certificate need to be readable by the user
-# unbound runs as if you're using the regular control interface.
-# If you're using a UNIX socket, that has to be readable by the netdata user.
+# ssacli_path: /usr/sbin/ssacli # The path to the ssacli executable
+# use_sudo: True # Whether to use sudo or not
+# ----------------------------------------------------------------------
-# The following should work for most users if they have unbound configured
-# correctly.
-local:
- ubconf: /etc/unbound/unbound.conf
+# ssacli_path: /usr/sbin/ssacli
+# use_sudo: True
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
index 99b28cfeb..55aad52f0 100644
--- a/collectors/python.d.plugin/httpcheck/README.md
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -1,6 +1,12 @@
-# httpcheck
+<!--
+title: "HTTP endpoint monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/httpcheck/README.md
+sidebar_label: "HTTP endpoints"
+-->
-Module monitors remote http server for availability and response time.
+# HTTP endpoint monitoring with Netdata
+
+Monitors remote http server for availability and response time.
Following charts are drawn per job:
@@ -17,7 +23,15 @@ Following charts are drawn per job:
- Connection failed: port not listening or blocked
- Connection timed out: host or port unreachable
-## configuration
+## Configuration
+
+Edit the `python.d/httpcheck.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/httpcheck.conf
+```
Sample configuration and their default values.
@@ -32,7 +46,7 @@ server:
redirect: yes # optional
```
-### notes
+### Notes
- The status chart is primarily intended for alarms, badges or for access via API.
- A system/service/firewall might block Netdata's access if a portscan or
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
index fd51370da..75718bb60 100644
--- a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
@@ -3,9 +3,10 @@
# Original Author: ccremer (github.com/ccremer)
# SPDX-License-Identifier: GPL-3.0-or-later
-import urllib3
import re
+import urllib3
+
try:
from time import monotonic as time
except ImportError:
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
index 1e1dd0205..95adba270 100644
--- a/collectors/python.d.plugin/httpcheck/httpcheck.conf
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf
@@ -70,9 +70,12 @@ chart_cleanup: 0
# url: 'http[s]://host-ip-or-dns[:port][path]'
# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80
# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to /
+# header: {'Content-Type': 'application/json'}
+# # [optional] the HTTP header sent with the request.
# method: GET # [optional] the HTTP request method (POST, PUT, DELETE, HEAD etc.)
# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be
# # followed (default).
+# body: {'key': 'value'} # [optional] the body sent with the request (e.g. POST, PUT, PATCH).
# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the
# # status chart, however: The response time will still be > 0, since the
# # host responded with something.
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
index eabfee0a6..90cdaa5dc 100644
--- a/collectors/python.d.plugin/icecast/README.md
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -1,8 +1,14 @@
-# icecast
+<!--
+title: "Icecast monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/icecast/README.md
+sidebar_label: "Icecast"
+-->
-This module will monitor number of listeners for active sources.
+# Icecast monitoring with Netdata
-**Requirements:**
+Monitors the number of listeners for active sources.
+
+## Requirements
- icecast version >= 2.4.0
@@ -12,7 +18,15 @@ It produces the following charts:
- source number
-## configuration
+## Configuration
+
+Edit the `python.d/icecast.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/icecast.conf
+```
Needs only `url` to server's `/status-json.xsl`
diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
index e56e506e3..a967d1779 100644
--- a/collectors/python.d.plugin/icecast/icecast.chart.py
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -7,7 +7,6 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'listeners',
]
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
index 639631501..4d3b0ecbe 100644
--- a/collectors/python.d.plugin/ipfs/README.md
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -1,26 +1,49 @@
-# ipfs
+<!--
+title: "IPFS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/README.md
+sidebar_label: "IPFS"
+-->
-Module monitors [IPFS](https://ipfs.io) basic information.
+# IPFS monitoring with Netdata
-1. **Bandwidth** in kbits/s
+Collects [`IPFS`](https://ipfs.io) basic information like file system bandwidth, peers and repo metrics.
- - in
- - out
+## Charts
-2. **Peers**
+It produces the following charts:
- - peers
+- Bandwidth in `kilobits/s`
+- Peers in `peers`
+- Repo Size in `GiB`
+- Repo Objects in `objects`
-## configuration
+## Configuration
-Only url to IPFS server is needed.
+Edit the `python.d/ipfs.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-Sample:
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ipfs.conf
+```
+
+---
+
+Calls to the following endpoints are disabled due to `IPFS` bugs:
+
+- `/api/v0/stats/repo` (https://github.com/ipfs/go-ipfs/issues/3874)
+- `/api/v0/pin/ls` (https://github.com/ipfs/go-ipfs/issues/7528)
+
+Can be enabled in the collector configuration file.
+
+The configuration needs only `url` to `IPFS` server, here is an example for 2 `IPFS` instances:
```yaml
localhost:
- name : 'local'
- url : 'http://localhost:5001'
+ url: 'http://localhost:5001'
+
+remote:
+ url: 'http://203.0.113.10::5001'
```
---
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
index 8c89b4be1..abfc9c492 100644
--- a/collectors/python.d.plugin/ipfs/ipfs.chart.py
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -7,7 +7,6 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'bandwidth',
'peers',
@@ -64,7 +63,9 @@ class Service(UrlService):
self.order = ORDER
self.definitions = CHARTS
self.baseurl = self.configuration.get('url', 'http://localhost:5001')
+ self.method = "POST"
self.do_pinapi = self.configuration.get('pinapi')
+ self.do_repoapi = self.configuration.get('repoapi')
self.__storage_max = None
def _get_json(self, sub_url):
@@ -89,7 +90,7 @@ class Service(UrlService):
if store_max.endswith('b'):
val, units = store_max[:-2], store_max[-2]
if units in SI_zeroes:
- val += '0'*SI_zeroes[units]
+ val += '0' * SI_zeroes[units]
store_max = val
try:
store_max = int(store_max)
@@ -110,17 +111,33 @@ class Service(UrlService):
# suburl : List of (result-key, original-key, transform-func)
cfg = {
'/api/v0/stats/bw':
- [('in', 'RateIn', int), ('out', 'RateOut', int)],
+ [
+ ('in', 'RateIn', int),
+ ('out', 'RateOut', int),
+ ],
'/api/v0/swarm/peers':
- [('peers', 'Peers', len)],
- '/api/v0/stats/repo':
- [('size', 'RepoSize', int), ('objects', 'NumObjects', int), ('avail', 'StorageMax', self._storagemax)],
+ [
+ ('peers', 'Peers', len),
+ ],
}
+ if self.do_repoapi:
+ cfg.update({
+ '/api/v0/stats/repo':
+ [
+ ('size', 'RepoSize', int),
+ ('objects', 'NumObjects', int),
+ ('avail', 'StorageMax', self._storagemax),
+ ],
+ })
+
if self.do_pinapi:
- cfg.update({
- '/api/v0/pin/ls':
- [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)]
- })
+ cfg.update({
+ '/api/v0/pin/ls':
+ [
+ ('pinned', 'Keys', len),
+ ('recursive_pins', 'Keys', self._recursive_pins),
+ ]
+ })
r = dict()
for suburl in cfg:
in_json = self._get_json(suburl)
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
index c7e186487..8b167b399 100644
--- a/collectors/python.d.plugin/ipfs/ipfs.conf
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -62,6 +62,10 @@
# Additionally to the above, ipfs also supports the following:
#
# url: 'URL' # URL to the IPFS API
+# repoapi: no # Collect repo metrics
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/7528
+# # resulting in very high CPU Usage
# pinapi: no # Set status of IPFS pinned object polling
# # Currently defaults to disabled due to IPFS Bug
# # https://github.com/ipfs/go-ipfs/issues/3874
@@ -72,6 +76,7 @@
# only one of them will run (they have the same name)
localhost:
- name : 'local'
- url : 'http://localhost:5001'
- pinapi : no
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
index f90cd041e..5830bd63e 100644
--- a/collectors/python.d.plugin/isc_dhcpd/README.md
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -1,11 +1,18 @@
-# isc_dhcpd
+<!--
+title: "ISC DHCP monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/isc_dhcpd/README.md
+sidebar_label: "ISC DHCP"
+-->
-Module monitor leases database to show all active leases for given pools.
+# ISC DHCP monitoring with Netdata
-**Requirements:**
+Monitors the leases database to show all active leases for given pools.
+
+## Requirements
- dhcpd leases file MUST BE readable by Netdata
- pools MUST BE in CIDR format
+- `python-ipaddress` package is needed in Python2
It produces:
@@ -21,17 +28,28 @@ It produces:
- leases (number of active leases in pool)
-## configuration
+## Configuration
+
+Edit the `python.d/isc_dhcpd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/isc_dhcpd.conf
+```
Sample:
```yaml
local:
- leases_path : '/var/lib/dhcp/dhcpd.leases'
- pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+ leases_path: '/var/lib/dhcp/dhcpd.leases'
+ pools:
+ office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+ wifi: '192.168.3.10-192.168.3.20' # name(dimension): pool in IP Range format
+ 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+ wifi-guest: '192.168.5.0/24 192.168.6.10-192.168.6.20' # name(dimension): pool in CIDR + IP Range format
```
-In case of python2 you need to install `py2-ipaddress` to make plugin work.
The module will not work If no configuration is given.
---
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
index a29439251..099c7d4e9 100644
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
@@ -7,9 +7,9 @@ import os
import re
import time
-
try:
import ipaddress
+
HAVE_IP_ADDRESS = True
except ImportError:
HAVE_IP_ADDRESS = False
@@ -19,7 +19,6 @@ from copy import deepcopy
from bases.FrameworkServices.SimpleService import SimpleService
-
ORDER = [
'pools_utilization',
'pools_active_leases',
@@ -46,6 +45,19 @@ CHARTS = {
}
}
+POOL_CIDR = "CIDR"
+POOL_IP_RANGE = "IP_RANGE"
+POOL_UNKNOWN = "UNKNOWN"
+
+def detect_ip_type(ip):
+ ip_type = ip.split("-")
+ if len(ip_type) == 1:
+ return POOL_CIDR
+ elif len(ip_type) == 2:
+ return POOL_IP_RANGE
+ else:
+ return POOL_UNKNOWN
+
class DhcpdLeasesFile:
def __init__(self, path):
@@ -87,6 +99,32 @@ class Pool:
def __init__(self, name, network):
self.id = re.sub(r'[:/.-]+', '_', name)
self.name = name
+
+ self.networks = list()
+ for network in network.split(" "):
+ if not network:
+ continue
+
+ ip_type = detect_ip_type(ip=network)
+ if ip_type == POOL_CIDR:
+ self.networks.append(PoolCIDR(network=network))
+ elif ip_type == POOL_IP_RANGE:
+ self.networks.append(PoolIPRange(ip_range=network))
+ else:
+ raise ValueError('Network ({0}) incorrect syntax, expect CIDR or IPRange format.'.format(network))
+
+ def num_hosts(self):
+ return sum([network.num_hosts() for network in self.networks])
+
+ def __contains__(self, item):
+ for network in self.networks:
+ if item in network:
+ return True
+ return False
+
+
+class PoolCIDR:
+ def __init__(self, network):
self.network = ipaddress.ip_network(address=u'%s' % network)
def num_hosts(self):
@@ -96,6 +134,30 @@ class Pool:
return item.address in self.network
+class PoolIPRange:
+ def __init__(self, ip_range):
+ ip_range = ip_range.split("-")
+ self.networks = list(self._summarize_address_range(ip_range[0], ip_range[1]))
+
+ @staticmethod
+ def ip_address(ip):
+ return ipaddress.ip_address(u'%s' % ip)
+
+ def _summarize_address_range(self, first, last):
+ address_first = self.ip_address(first)
+ address_last = self.ip_address(last)
+ return ipaddress.summarize_address_range(address_first, address_last)
+
+ def num_hosts(self):
+ return sum([network.num_addresses for network in self.networks])
+
+ def __contains__(self, item):
+ for network in self.networks:
+ if item.address in network:
+ return True
+ return False
+
+
class Lease:
def __init__(self, address, ends, state):
self.address = ipaddress.ip_address(address=u'%s' % address)
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
index 8dcb5082f..c700947b4 100644
--- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
@@ -63,9 +63,10 @@
#
# leases_path: 'PATH' # the path to dhcpd.leases file
# pools:
-# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
-# wifi: '192.168.3.0/24' # name(dimension): pool in CIDR format
-# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+# wifi: '192.168.3.10-192.168.3.20' # name(dimension): pool in IP Range format
+# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+# wifi-guest: '192.168.5.0/24 192.168.6.10-192.168.6.20' # name(dimension): pool in CIDR + IP Range format
#
#-----------------------------------------------------------------------
# IMPORTANT notes
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
index 586973bf0..2225773b7 100644
--- a/collectors/python.d.plugin/litespeed/README.md
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -1,6 +1,12 @@
-# litespeed
+<!--
+title: "LiteSpeed monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/litespeed/README.md
+sidebar_label: "LiteSpeed"
+-->
-Module monitor litespeed web server performance metrics.
+# LiteSpeed monitoring with Netdata
+
+Collects web server performance metrics for network, connection, requests, and cache.
It produces:
@@ -44,7 +50,15 @@ It produces:
- hits
-## configuration
+## Configuration
+
+Edit the `python.d/litespeed.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/litespeed.conf
+```
```yaml
local:
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
index 4b67ffb8a..7ef8189ea 100644
--- a/collectors/python.d.plugin/litespeed/litespeed.chart.py
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
@@ -1,30 +1,28 @@
# -*- coding: utf-8 -*-
# Description: litespeed netdata python.d module
-# Author: Ilya Maschenko (ilyam8)
+# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
import glob
-import re
import os
-
+import re
from collections import namedtuple
from bases.FrameworkServices.SimpleService import SimpleService
-
update_every = 10
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
- 'net_throughput_http', # net throughput
+ 'net_throughput_http', # net throughput
'net_throughput_https', # net throughput
- 'connections_http', # connections
- 'connections_https', # connections
- 'requests', # requests
- 'requests_processing', # requests
- 'pub_cache_hits', # cache
- 'private_cache_hits', # cache
- 'static_hits', # static
+ 'connections_http', # connections
+ 'connections_https', # connections
+ 'requests', # requests
+ 'requests_processing', # requests
+ 'pub_cache_hits', # cache
+ 'private_cache_hits', # cache
+ 'static_hits', # static
]
CHARTS = {
@@ -178,7 +176,7 @@ class Service(SimpleService):
def parse_file(data, lines):
for line in lines:
- if not line.startswith(('BPS_IN:', 'MAXCONN:', 'REQ_RATE []:')):
+ if not line.startswith(('BPS_IN:', 'MAXCONN:', 'PLAINCONN:', 'REQ_RATE []:')):
continue
m = dict(RE.findall(line))
for v in T:
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
index 5aa1fa627..3e2d4c190 100644
--- a/collectors/python.d.plugin/logind/README.md
+++ b/collectors/python.d.plugin/logind/README.md
@@ -1,6 +1,12 @@
-# logind
+<!--
+title: "systemd-logind monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/logind/README.md
+sidebar_label: "systemd-logind"
+-->
-This module monitors active sessions, users, and seats tracked by systemd-logind or elogind.
+# Systemd-Logind monitoring with Netdata
+
+Monitors active sessions, users, and seats tracked by `systemd-logind` or `elogind`.
It provides the following charts:
@@ -20,9 +26,22 @@ It provides the following charts:
- Seats
-## configuration
+## Enable the collector
+
+The `logind` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `logind` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
+restart netdata`, or the appropriate method for your system, to finish enabling the `logind` collector.
-This module needs no configuration. Just make sure the `netdata` user
+## Configuration
+
+This module needs no configuration. Just make sure the `netdata` user
can run the `loginctl` command and get a session list without having to
specify a path.
@@ -35,7 +54,15 @@ specify it using the `command` key like so:
command: '/path/to/other/command'
```
-## notes
+Edit the `python.d/logind.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/logind.conf
+```
+
+## Notes
- This module's ability to track logins is dependent on what PAM services
are configured to register sessions with logind. In particular, for
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
index afc8cbda6..400a45973 100644
--- a/collectors/python.d.plugin/megacli/README.md
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -1,12 +1,25 @@
-# megacli
+<!--
+title: "MegaRAID controller monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/megacli/README.md
+sidebar_label: "MegaRAID controllers"
+-->
-Module collects adapter, physical drives and battery stats.
+# MegaRAID controller monitoring with Netdata
-**Requirements:**
+Collects adapter, physical drives and battery stats.
+
+## Requirements
+
+Uses the `megacli` program, which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `megacli` as root without password.
+
+Add to `sudoers`:
+
+```
+netdata ALL=(root) NOPASSWD: /path/to/megacli
+```
-- `megacli` program
-- `sudo` program
-- `netdata` user needs to be able to be able to sudo the `megacli` program without password
To grab stats it executes:
@@ -25,19 +38,9 @@ It produces:
5. **Battery Cycle Count**
-## prerequisite
-This module uses `megacli` which can only be executed by root. It uses
-`sudo` and assumes that it is configured such that the `netdata` user can
-execute `megacli` as root without password.
-
-Add to `sudoers`:
-
-```
-netdata ALL=(root) NOPASSWD: /path/to/megacli
-```
-### configuration
+## Configuration
**megacli** is disabled by default. Should be explicitly enabled in `python.d.conf`.
@@ -45,7 +48,15 @@ netdata ALL=(root) NOPASSWD: /path/to/megacli
megacli: yes
```
-Battery stats disabled by default. To enable them modify `megacli.conf`.
+Edit the `python.d/megacli.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/megacli.conf
+```
+
+Battery stats disabled by default. To enable them, modify `megacli.conf`.
```yaml
do_battery: yes
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
index 4872eab80..ef35ff63f 100644
--- a/collectors/python.d.plugin/megacli/megacli.chart.py
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -9,7 +9,6 @@ import re
from bases.FrameworkServices.ExecutableService import ExecutableService
from bases.collection import find_binary
-
disabled_by_default = True
update_every = 5
@@ -27,7 +26,7 @@ def adapter_charts(ads):
'adapter_degraded': {
'options': [None, 'Adapter State', 'is degraded', 'adapter', 'megacli.adapter_degraded', 'line'],
'lines': dims(ads)
- },
+ },
}
return order, charts
@@ -111,7 +110,7 @@ def find_adapters(d):
def find_pds(d):
- keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
+ keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
d = ' '.join(v.strip() for v in d if v.startswith(keys))
return [PD(*v) for v in RE_VD.findall(d)]
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
index 169a5f7bf..abd93fd01 100644
--- a/collectors/python.d.plugin/memcached/README.md
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -1,6 +1,13 @@
-# memcached
+<!--
+title: "Memcached monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/memcached/README.md
+sidebar_label: "Memcached"
+-->
+
+# Memcached monitoring with Netdata
+
+Collects memory-caching system performance metrics. It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).
-Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
1. **Network** in kilobytes/s
@@ -66,7 +73,15 @@ Memcached monitoring module. Data grabbed from [stats interface](https://github.
- rate
-## configuration
+## Configuration
+
+Edit the `python.d/memcached.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/memcached.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
index 9803dbb09..bb656a2d6 100644
--- a/collectors/python.d.plugin/memcached/memcached.chart.py
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.SocketService import SocketService
-
ORDER = [
'cache',
'net',
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
index fd694c1e5..c0df123d7 100644
--- a/collectors/python.d.plugin/mongodb/README.md
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -1,8 +1,14 @@
-# mongodb
+<!--
+title: "MongoDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/mongodb/README.md
+sidebar_label: "MongoDB"
+-->
-Module monitor mongodb performance and health metrics
+# MongoDB monitoring with Netdata
-**Requirements:**
+Monitors performance and health metrics of MongoDB.
+
+## Requirements
- `python-pymongo` package v2.4+.
@@ -74,7 +80,7 @@ Number of charts depends on mongodb version, storage engine and other features (
13. **Cache metrics** (WiredTiger):
- percentage of bytes currently in the cache (amount of space taken by cached data)
- - percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
+ - percentage of tracked dirty bytes in the cache (amount of space taken by dirty data)
14. **Pages evicted from cache** (WiredTiger):
@@ -175,7 +181,15 @@ db.createUser({
})
```
-### configuration
+## Configuration
+
+Edit the `python.d/mongodb.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/mongodb.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
index 0dbe82ff9..2e6fb220a 100644
--- a/collectors/python.d.plugin/mongodb/mongodb.chart.py
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -12,13 +12,13 @@ from sys import exc_info
try:
from pymongo import MongoClient, ASCENDING, DESCENDING
from pymongo.errors import PyMongoError
+
PYMONGO = True
except ImportError:
PYMONGO = False
from bases.FrameworkServices.SimpleService import SimpleService
-
REPL_SET_STATES = [
('1', 'primary'),
('8', 'down'),
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
index a54b9d67f..fe1389687 100644
--- a/collectors/python.d.plugin/monit/README.md
+++ b/collectors/python.d.plugin/monit/README.md
@@ -1,4 +1,10 @@
-# monit
+<!--
+title: "Monit monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/monit/README.md
+sidebar_label: "Monit"
+-->
+
+# Monit monitoring with Netdata
Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
@@ -19,7 +25,15 @@ Monit monitoring module. Data is grabbed from stats XML interface (exists for a
- Hosts (+latency)
- Network interfaces
-## configuration
+## Configuration
+
+Edit the `python.d/monit.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/monit.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
index 9f3270572..bfc182349 100644
--- a/collectors/python.d.plugin/monit/monit.chart.py
+++ b/collectors/python.d.plugin/monit/monit.chart.py
@@ -4,12 +4,10 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import xml.etree.ElementTree as ET
-
from collections import namedtuple
from bases.FrameworkServices.UrlService import UrlService
-
MonitType = namedtuple('MonitType', ('index', 'name'))
# see enum Service_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
@@ -122,7 +120,7 @@ CHARTS = {
class BaseMonitService(object):
- def __init__(self, typ, name, status, monitor):
+ def __init__(self, typ, name, status, monitor):
self.type = typ
self.name = name
self.status = status
@@ -153,12 +151,21 @@ class BaseMonitService(object):
class ProcessMonitService(BaseMonitService):
- def __init__(self, typ, name, status, monitor):
+ def __init__(self, typ, name, status, monitor):
super(ProcessMonitService, self).__init__(typ, name, status, monitor)
self.uptime = None
self.threads = None
self.children = None
+ def __eq__(self, other):
+ return super(ProcessMonitService, self).__eq__(other)
+
+ def __ne__(self, other):
+ return super(ProcessMonitService, self).__ne__(other)
+
+ def __hash__(self):
+ return super(ProcessMonitService, self).__hash__()
+
def uptime_key(self):
return 'process_uptime_{0}'.format(self.name)
@@ -183,16 +190,25 @@ class ProcessMonitService(BaseMonitService):
class HostMonitService(BaseMonitService):
- def __init__(self, typ, name, status, monitor):
+ def __init__(self, typ, name, status, monitor):
super(HostMonitService, self).__init__(typ, name, status, monitor)
self.latency = None
+ def __eq__(self, other):
+ return super(HostMonitService, self).__eq__(other)
+
+ def __ne__(self, other):
+ return super(HostMonitService, self).__ne__(other)
+
+ def __hash__(self):
+ return super(HostMonitService, self).__hash__()
+
def latency_key(self):
return 'host_latency_{0}'.format(self.name)
def data(self):
base_data = super(HostMonitService, self).data()
- latency = float(self.latency) * 1000000 if self.latency else None
+ latency = float(self.latency) * 1000000 if self.latency else None
data = {self.latency_key(): latency}
data.update(base_data)
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
index 7dca8f406..d8d3c1d0b 100644
--- a/collectors/python.d.plugin/mysql/README.md
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -1,8 +1,14 @@
-# mysql
+<!--
+title: "MySQL monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/mysql/README.md
+sidebar_label: "MySQL"
+-->
-Module monitors one or more mysql servers
+# MySQL monitoring with Netdata
-**Requirements:**
+Monitors one or more MySQL servers.
+
+## Requirements
- python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
- `netdata` local user to connect to the MySQL server.
@@ -39,7 +45,7 @@ This module will produce following charts (if data is available):
- cache hits
- replace
-4. **Handlerse** in handlers/s
+4. **Handlers** in handlers/s
- commit
- delete
@@ -61,7 +67,7 @@ This module will produce following charts (if data is available):
- immediate
- waited
-6. **Table Select Join Issuess** in joins/s
+6. **Table Select Join Issues** in joins/s
- full join
- full range join
@@ -69,7 +75,7 @@ This module will produce following charts (if data is available):
- range check
- scan
-7. **Table Sort Issuess** in joins/s
+7. **Table Sort Issues** in joins/s
- merge passes
- range
@@ -158,7 +164,7 @@ This module will produce following charts (if data is available):
- updated
- deleted
-24. **InnoDB Buffer Pool Pagess** in pages
+24. **InnoDB Buffer Pool Pages** in pages
- data
- dirty
@@ -328,7 +334,15 @@ This module will produce following charts (if data is available):
- update
- other
-## configuration
+## Configuration
+
+Edit the `python.d/mysql.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/mysql.conf
+```
You can provide, per server, the following:
@@ -371,7 +385,7 @@ remote:
```
If no configuration is given, the module will attempt to connect to MySQL server via a unix socket at
-`/var/run/mysqld/mysqld.sock` without password and with username `root`.
+`/var/run/mysqld/mysqld.sock` without password and with username `root` or `netdata` (you granted permissions for `netdata` user in the Requirements section of this document).
`userstats` graph works only if you enable the plugin in MariaDB server and set proper MySQL privileges (SUPER or
PROCESS). For more details, please check the [MariaDB User Statistics
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
index f37315479..1737e16b4 100644
--- a/collectors/python.d.plugin/mysql/mysql.chart.py
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -6,7 +6,6 @@
from bases.FrameworkServices.MySQLService import MySQLService
-
# query executed on MySQL server
QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
QUERY_SLAVE = 'SHOW SLAVE STATUS;'
@@ -348,7 +347,7 @@ CHARTS = {
]
},
'threads_creation_rate': {
- 'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads', 'line'],
+ 'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads_creation_rate', 'line'],
'lines': [
['Threads_created', 'created', 'incremental'],
]
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
index ebbdb0f25..b55b01e7c 100644
--- a/collectors/python.d.plugin/nginx/README.md
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -1,8 +1,14 @@
-# nginx
+<!--
+title: "NGINX monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nginx/README.md
+sidebar_label: "NGINX"
+-->
-This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
+# NGINX monitoring with Netdata
-**Requirements:**
+Monitors one or more NGINX servers depending on configuration. Servers can be either local or remote.
+
+## Requirements
- nginx with configured 'ngx_http_stub_status_module'
- 'location /stub_status'
@@ -30,9 +36,17 @@ It produces following charts:
- accepts
- handled
-## configuration
+## Configuration
+
+Edit the `python.d/nginx.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nginx.conf
+```
-Needs only `url` to server's `stub_status`
+Needs only `url` to server's `stub_status`.
Here is an example for local server:
diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
index 84a5985e4..7548d6a42 100644
--- a/collectors/python.d.plugin/nginx/nginx.chart.py
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'connections',
'requests',
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
index 1110c5524..2580740c3 100644
--- a/collectors/python.d.plugin/nginx_plus/README.md
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -1,7 +1,12 @@
-# nginx_plus
+<!--
+title: "NGINX Plus monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nginx_plus/README.md
+sidebar_label: "NGINX Plus"
+-->
-This module will monitor one or more nginx_plus servers depending on configuration.
-Servers can be either local or remote.
+# NGINX Plus monitoring with Netdata
+
+Monitors one or more NGINX Plus servers depending on configuration. Servers can be either local or remote.
Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
@@ -134,11 +139,19 @@ For every cache:
- usage
-## configuration
+## Configuration
+
+Edit the `python.d/nginx_plus.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nginx_plus.conf
+```
-Needs only `url` to server's `status`
+Needs only `url` to server's `status`.
-Here is an example for local server:
+Here is an example for a local server:
```yaml
local:
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
index 6cf35cb13..a6c035f68 100644
--- a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -16,7 +16,6 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'requests_total',
'requests_current',
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
index 61bc5c458..1e7b240e7 100644
--- a/collectors/python.d.plugin/nsd/README.md
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -1,8 +1,14 @@
-# nsd
+<!--
+title: "NSD monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nsd/README.md
+sidebar_label: "NSD"
+-->
-Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
+# NSD monitoring with Netdata
-**Requirements:**
+Uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
+
+## Requirements
- Version of `nsd` must be 4.0+
- Netdata must have permissions to run `nsd-control stats_noreset`
diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
index 77b0d7bbf..6f9b2cec8 100644
--- a/collectors/python.d.plugin/nsd/nsd.chart.py
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -7,7 +7,6 @@ import re
from bases.FrameworkServices.ExecutableService import ExecutableService
-
update_every = 30
NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
index d4d0dc60d..0b08f12b8 100644
--- a/collectors/python.d.plugin/ntpd/README.md
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -1,8 +1,14 @@
-# ntpd
+<!--
+title: "NTP daemon monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ntpd/README.md
+sidebar_label: "NTP daemon"
+-->
-Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
+# NTP daemon monitoring with Netdata
-**Requirements:**
+Monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
+
+## Requirements
- Version: `NTPv4`
- Local interrogation allowed in `/etc/ntp.conf` (default):
@@ -41,7 +47,15 @@ It produces:
- ppoll
- precision
-## configuration
+## Configuration
+
+Edit the `python.d/ntpd.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ntpd.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
index e33332cb3..275d2276c 100644
--- a/collectors/python.d.plugin/ntpd/ntpd.chart.py
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -4,12 +4,11 @@
# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
-import struct
import re
+import struct
from bases.FrameworkServices.SocketService import SocketService
-
# NTP Control Message Protocol constants
MODE = 6
HEADER_FORMAT = '!BBHHHHH'
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
index 71e3e2889..9bfb2094b 100644
--- a/collectors/python.d.plugin/nvidia_smi/README.md
+++ b/collectors/python.d.plugin/nvidia_smi/README.md
@@ -1,42 +1,58 @@
-# nvidia_smi
+<!--
+title: "Nvidia GPU monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nvidia_smi/README.md
+sidebar_label: "Nvidia GPUs"
+-->
-This module monitors the `nvidia-smi` cli tool.
+# Nvidia GPU monitoring with Netdata
-**Requirements and Notes:**
+Monitors performance metrics (memory usage, fan speed, pcie bandwidth utilization, temperature, etc.) using `nvidia-smi` cli tool.
-- You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
-- You must enable this plugin as its disabled by default due to minor performance issues.
+## Requirements and Notes
+- You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
+- You must enable this plugin as its disabled by default due to minor performance issues.
- On some systems when the GPU is idle the `nvidia-smi` tool unloads and there is added latency again when it is next queried. If you are running GPUs under constant workload this isn't likely to be an issue.
-
- Currently the `nvidia-smi` tool is being queried via cli. Updating the plugin to use the nvidia c/c++ API directly should resolve this issue. See discussion here: <https://github.com/netdata/netdata/pull/4357>
-
- Contributions are welcome.
-
- Make sure `netdata` user can execute `/usr/bin/nvidia-smi` or wherever your binary is.
-
+- If `nvidia-smi` process [is not killed after netdata restart](https://github.com/netdata/netdata/issues/7143) you need to off `loop_mode`.
- `poll_seconds` is how often in seconds the tool is polled for as an integer.
-It produces:
+## Charts
+
+It produces the following charts:
-1. Per GPU
+- PCI Express Bandwidth Utilization in `KiB/s`
+- Fan Speed in `percentage`
+- GPU Utilization in `percentage`
+- Memory Bandwidth Utilization in `percentage`
+- Encoder/Decoder Utilization in `percentage`
+- Memory Usage in `MiB`
+- Temperature in `celsius`
+- Clock Frequencies in `MHz`
+- Power Utilization in `Watts`
+- Memory Used by Each Process in `MiB`
+- Memory Used by Each User in `MiB`
+- Number of User on GPU in `num`
- - GPU utilization
- - memory allocation
- - memory utilization
- - fan speed
- - power usage
- - temperature
- - clock speed
- - PCI bandwidth
+## Configuration
-## configuration
+Edit the `python.d/nvidia_smi.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nvidia_smi.conf
+```
Sample:
```yaml
-poll_seconds: 1
+loop_mode : yes
+poll_seconds : 1
+exclude_zero_memory_users : yes
```
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnvidia_smi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
index 0bea268ef..9c69586dd 100644
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
@@ -2,21 +2,22 @@
# Description: nvidia-smi netdata python.d module
# Original Author: Steven Noonan (tycho)
# Author: Ilya Mashchenko (ilyam8)
+# User Memory Stat Author: Guido Scatena (scatenag)
import subprocess
import threading
+import os
+import pwd
+
import xml.etree.ElementTree as et
-from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
disabled_by_default = True
-
NVIDIA_SMI = 'nvidia-smi'
-BAD_VALUE = 'N/A'
-
EMPTY_ROW = ''
EMPTY_ROW_LIMIT = 500
POLLER_BREAK_ROW = '</nvidia_smi_log>'
@@ -31,6 +32,8 @@ TEMPERATURE = 'temperature'
CLOCKS = 'clocks'
POWER = 'power'
PROCESSES_MEM = 'processes_mem'
+USER_MEM = 'user_mem'
+USER_NUM = 'user_num'
ORDER = [
PCI_BANDWIDTH,
@@ -43,6 +46,8 @@ ORDER = [
CLOCKS,
POWER,
PROCESSES_MEM,
+ USER_MEM,
+ USER_NUM,
]
@@ -76,7 +81,8 @@ def gpu_charts(gpu):
]
},
ENCODER_UTIL: {
- 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization', 'line'],
+ 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization',
+ 'line'],
'lines': [
['encoder_util', 'encoder'],
['decoder_util', 'decoder'],
@@ -114,6 +120,16 @@ def gpu_charts(gpu):
'options': [None, 'Memory Used by Each Process', 'MiB', fam, 'nvidia_smi.processes_mem', 'stacked'],
'lines': []
},
+ USER_MEM: {
+ 'options': [None, 'Memory Used by Each User', 'MiB', fam, 'nvidia_smi.user_mem', 'stacked'],
+ 'lines': []
+ },
+ USER_NUM: {
+ 'options': [None, 'Number of User on GPU', 'num', fam, 'nvidia_smi.user_num', 'line'],
+ 'lines': [
+ ['user_num', 'users'],
+ ]
+ },
}
idx = gpu.num
@@ -212,6 +228,7 @@ def handle_attr_error(method):
return method(*args, **kwargs)
except AttributeError:
return None
+
return on_call
@@ -221,13 +238,66 @@ def handle_value_error(method):
return method(*args, **kwargs)
except ValueError:
return None
+
return on_call
+HOST_PREFIX = os.getenv('NETDATA_HOST_PREFIX')
+ETC_PASSWD_PATH = '/etc/passwd'
+PROC_PATH = '/proc'
+
+IS_INSIDE_DOCKER = False
+
+if HOST_PREFIX:
+ ETC_PASSWD_PATH = os.path.join(HOST_PREFIX, ETC_PASSWD_PATH[1:])
+ PROC_PATH = os.path.join(HOST_PREFIX, PROC_PATH[1:])
+ IS_INSIDE_DOCKER = True
+
+
+def read_passwd_file():
+ data = dict()
+ with open(ETC_PASSWD_PATH, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith("#"):
+ continue
+ fields = line.split(":")
+ # name, passwd, uid, gid, comment, home_dir, shell
+ if len(fields) != 7:
+ continue
+ # uid, guid
+ fields[2], fields[3] = int(fields[2]), int(fields[3])
+ data[fields[2]] = fields
+ return data
+
+
+def read_passwd_file_safe():
+ try:
+ if IS_INSIDE_DOCKER:
+ return read_passwd_file()
+ return dict((k[2], k) for k in pwd.getpwall())
+ except (OSError, IOError):
+ return dict()
+
+
+def get_username_by_pid_safe(pid, passwd_file):
+ path = os.path.join(PROC_PATH, pid)
+ try:
+ uid = os.stat(path).st_uid
+ except (OSError, IOError):
+ return ''
+
+ try:
+ return passwd_file[uid][0]
+ except KeyError:
+ return str(uid)
+
+
class GPU:
- def __init__(self, num, root):
+ def __init__(self, num, root, exclude_zero_memory_users=False):
self.num = num
self.root = root
+ self.exclude_zero_memory_users = exclude_zero_memory_users
def id(self):
return self.root.get('id')
@@ -301,15 +371,22 @@ class GPU:
@handle_attr_error
def processes(self):
- p_nodes = self.root.find('processes').findall('process_info')
- ps = []
- for p in p_nodes:
- ps.append({
- 'pid': p.find('pid').text,
- 'process_name': p.find('process_name').text,
- 'used_memory': int(p.find('used_memory').text.split()[0]),
+ processes_info = self.root.find('processes').findall('process_info')
+ if not processes_info:
+ return list()
+
+ passwd_file = read_passwd_file_safe()
+ processes = list()
+
+ for info in processes_info:
+ pid = info.find('pid').text
+ processes.append({
+ 'pid': int(pid),
+ 'process_name': info.find('process_name').text,
+ 'used_memory': int(info.find('used_memory').text.split()[0]),
+ 'username': get_username_by_pid_safe(pid, passwd_file),
})
- return ps
+ return processes
def data(self):
data = {
@@ -330,11 +407,21 @@ class GPU:
'power_draw': self.power_draw(),
}
processes = self.processes() or []
- data.update({'process_mem_{0}'.format(p['pid']): p['used_memory'] for p in processes})
-
- return dict(
- ('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items() if v is not None and v != BAD_VALUE
- )
+ users = set()
+ for p in processes:
+ data['process_mem_{0}'.format(p['pid'])] = p['used_memory']
+ if p['username']:
+ if self.exclude_zero_memory_users and p['used_memory'] == 0:
+ continue
+ users.add(p['username'])
+ key = 'user_mem_{0}'.format(p['username'])
+ if key in data:
+ data[key] += p['used_memory']
+ else:
+ data[key] = p['used_memory']
+ data['user_num'] = len(users)
+
+ return dict(('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items())
class Service(SimpleService):
@@ -342,10 +429,12 @@ class Service(SimpleService):
super(Service, self).__init__(configuration=configuration, name=name)
self.order = list()
self.definitions = dict()
+ self.loop_mode = configuration.get('loop_mode', True)
poll = int(configuration.get('poll_seconds', 1))
+ self.exclude_zero_memory_users = configuration.get('exclude_zero_memory_users', False)
self.poller = NvidiaSMIPoller(poll)
- def get_data(self):
+ def get_data_loop_mode(self):
if not self.poller.is_started():
self.poller.start()
@@ -353,7 +442,17 @@ class Service(SimpleService):
self.debug('poller is off')
return None
- last_data = self.poller.data()
+ return self.poller.data()
+
+ def get_data_normal_mode(self):
+ return self.poller.run_once()
+
+ def get_data(self):
+ if self.loop_mode:
+ last_data = self.get_data_loop_mode()
+ else:
+ last_data = self.get_data_normal_mode()
+
if not last_data:
return None
@@ -363,9 +462,13 @@ class Service(SimpleService):
data = dict()
for idx, root in enumerate(parsed.findall('gpu')):
- gpu = GPU(idx, root)
- data.update(gpu.data())
+ gpu = GPU(idx, root, self.exclude_zero_memory_users)
+ gpu_data = gpu.data()
+ # self.debug(gpu_data)
+ gpu_data = dict((k, v) for k, v in gpu_data.items() if is_gpu_data_value_valid(v))
+ data.update(gpu_data)
self.update_processes_mem_chart(gpu)
+ self.update_processes_user_mem_chart(gpu)
return data or None
@@ -384,6 +487,24 @@ class Service(SimpleService):
if dim.id not in active_dim_ids:
chart.del_dimension(dim.id, hide=False)
+ def update_processes_user_mem_chart(self, gpu):
+ ps = gpu.processes()
+ if not ps:
+ return
+ chart = self.charts['gpu{0}_{1}'.format(gpu.num, USER_MEM)]
+ active_dim_ids = []
+ for p in ps:
+ if not p.get('username'):
+ continue
+ dim_id = 'gpu{0}_user_mem_{1}'.format(gpu.num, p['username'])
+ active_dim_ids.append(dim_id)
+ if dim_id not in chart:
+ chart.add_dimension([dim_id, '{0}'.format(p['username'])])
+
+ for dim in chart:
+ if dim.id not in active_dim_ids:
+ chart.del_dimension(dim.id, hide=False)
+
def check(self):
if not self.poller.has_smi():
self.error("couldn't find '{0}' binary".format(NVIDIA_SMI))
@@ -419,3 +540,11 @@ class Service(SimpleService):
order, charts = gpu_charts(GPU(idx, root))
self.order.extend(order)
self.definitions.update(charts)
+
+
+def is_gpu_data_value_valid(value):
+ try:
+ int(value)
+ except (TypeError, ValueError):
+ return False
+ return True
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
index 53e544a5d..3d2a30d41 100644
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
@@ -61,6 +61,8 @@
#
# Additionally to the above, example also supports the following:
#
-# poll_seconds: SECONDS # default is 1. Sets the frequency of seconds the nvidia-smi tool is polled.
+# loop_mode: yes/no # default is yes. If set to yes `nvidia-smi` is executed in a separate thread using `-l` option.
+# poll_seconds: SECONDS # default is 1. Sets the frequency of seconds the nvidia-smi tool is polled in loop mode.
+# exclude_zero_memory_users: yes/no # default is no. Whether to collect users metrics with 0Mb memory allocation.
#
# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
index f1f9de581..4942d0f39 100644
--- a/collectors/python.d.plugin/openldap/README.md
+++ b/collectors/python.d.plugin/openldap/README.md
@@ -1,6 +1,12 @@
-# openldap
+<!--
+title: "OpenLDAP monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/openldap/README.md
+sidebar_label: "OpenLDAP"
+-->
-This module provides statistics information from openldap (slapd) server.
+# OpenLDAP monitoring with Netdata
+
+Provides statistics information from openldap (slapd) server.
Statistics are taken from LDAP monitoring interface. Manual page, slapd-monitor(5) is available.
**Requirement:**
@@ -47,7 +53,15 @@ Statistics are taken from LDAP monitoring interface. Manual page, slapd-monitor(
- read
- write
-### configuration
+## Configuration
+
+Edit the `python.d/openldap.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/openldap.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/openldap/openldap.chart.py b/collectors/python.d.plugin/openldap/openldap.chart.py
index 3266ce400..aba143954 100644
--- a/collectors/python.d.plugin/openldap/openldap.chart.py
+++ b/collectors/python.d.plugin/openldap/openldap.chart.py
@@ -5,18 +5,19 @@
try:
import ldap
+
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
from bases.FrameworkServices.SimpleService import SimpleService
-
DEFAULT_SERVER = 'localhost'
DEFAULT_PORT = '389'
DEFAULT_TLS = False
DEFAULT_CERT_CHECK = True
DEFAULT_TIMEOUT = 1
+DEFAULT_START_TLS = False
ORDER = [
'total_connections',
@@ -49,7 +50,7 @@ CHARTS = {
]
},
'referrals_sent': {
- 'options': [None, 'Referrals', 'referals/s', 'ldap', 'openldap.referrals', 'line'],
+ 'options': [None, 'Referrals', 'referrals/s', 'ldap', 'openldap.referrals', 'line'],
'lines': [
['referrals_sent', 'sent', 'incremental']
]
@@ -110,7 +111,7 @@ SEARCH_LIST = {
'add_operations': (
'cn=Add,cn=Operations,cn=Monitor', 'monitorOpInitiated',
),
- 'delete_operations': (
+ 'delete_operations': (
'cn=Delete,cn=Operations,cn=Monitor', 'monitorOpCompleted',
),
'modify_operations': (
@@ -143,6 +144,7 @@ class Service(SimpleService):
self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
self.use_tls = configuration.get('use_tls', DEFAULT_TLS)
self.cert_check = configuration.get('cert_check', DEFAULT_CERT_CHECK)
+ self.use_start_tls = configuration.get('use_start_tls', DEFAULT_START_TLS)
self.alive = False
self.conn = None
@@ -159,8 +161,13 @@ class Service(SimpleService):
else:
self.conn = ldap.initialize('ldap://%s:%s' % (self.server, self.port))
self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, self.timeout)
- if self.use_tls and not self.cert_check:
+ if (self.use_tls or self.use_start_tls) and not self.cert_check:
self.conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+ if self.use_start_tls or self.use_tls:
+ self.conn.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
+ if self.use_start_tls:
+ self.conn.protocol_version = ldap.VERSION3
+ self.conn.start_tls_s()
if self.username and self.password:
self.conn.simple_bind(self.username, self.password)
except ldap.LDAPError as error:
@@ -193,17 +200,17 @@ class Service(SimpleService):
num = self.conn.search(dn, ldap.SCOPE_BASE, 'objectClass=*', [attr, ])
result_type, result_data = self.conn.result(num, 1)
except ldap.LDAPError as error:
- self.error("Empty result. Check bind username/password. Message: ",error)
+ self.error("Empty result. Check bind username/password. Message: ", error)
self.alive = False
return None
+ if result_type != 101:
+ continue
+
try:
- if result_type == 101:
- val = int(result_data[0][1].values()[0][0])
+ data[key] = int(list(result_data[0][1].values())[0][0])
except (ValueError, IndexError) as error:
self.debug(error)
continue
- data[key] = val
-
return data
diff --git a/collectors/python.d.plugin/openldap/openldap.conf b/collectors/python.d.plugin/openldap/openldap.conf
index 73e8636ed..5fd99a525 100644
--- a/collectors/python.d.plugin/openldap/openldap.conf
+++ b/collectors/python.d.plugin/openldap/openldap.conf
@@ -65,10 +65,11 @@ update_every: 10
# Set here your LDAP connection settings
-#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics
-#password : "yourpass" # The password for the binded user
-#server : 'localhost' # The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
-#port : 389 # The listening port of the LDAP server. Change to 636 port in case of TLS connection
-#use_tls : False # Make True if a TLS connection is used
-#cert_check : True # False if you want to ignore certificate check
-#timeout : 1 # Seconds to timeout if no connection exi
+#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics
+#password : "yourpass" # The password for the binded user
+#server : 'localhost' # The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
+#port : 389 # The listening port of the LDAP server. Change to 636 port in case of TLS connection
+#use_tls : False # Make True if a TLS connection is used over ldaps://
+#use_start_tls: False # Make True if a TLS connection is used over ldap://
+#cert_check : True # False if you want to ignore certificate check
+#timeout : 1 # Seconds to timeout if no connection exi
diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md
index 708f261d9..d61c7d2ad 100644
--- a/collectors/python.d.plugin/oracledb/README.md
+++ b/collectors/python.d.plugin/oracledb/README.md
@@ -1,8 +1,14 @@
-# oracledb
+<!--
+title: "OracleDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/oracledb/README.md
+sidebar_label: "OracleDB"
+-->
-Module monitor oracledb performance and health metrics.
+# OracleDB monitoring with Netdata
-**Requirements:**
+Monitors the performance and health metrics of the Oracle database.
+
+## Requirements
- `cx_Oracle` package.
- Oracle Client (using `cx_Oracle` requires Oracle Client libraries to be installed).
@@ -35,14 +41,19 @@ It produces following charts:
- Size
- Usage
- Usage In Percent
+- allocated space
+ - Size
+ - Usage
+ - Usage In Percent
## prerequisite
To use the Oracle module do the following:
-1. Install `cx_Oracle` package ([link](https://cx-oracle.readthedocs.io/en/latest/installation.html#install-cx-oracle)).
+1. Install `cx_Oracle` package ([link](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html)).
-2. Install Oracle Client libraries ([link](https://cx-oracle.readthedocs.io/en/latest/installation.html#install-oracle-client)).
+2. Install Oracle Client libraries
+ ([link](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html#install-oracle-client)).
3. Create a read-only `netdata` user with proper access to your Oracle Database Server.
@@ -57,7 +68,15 @@ GRANT CONNECT TO netdata;
GRANT SELECT_CATALOG_ROLE TO netdata;
```
-### configuration
+## Configuration
+
+Edit the `python.d/oracledb.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/oracledb.conf
+```
```yaml
local:
diff --git a/collectors/python.d.plugin/oracledb/oracledb.chart.py b/collectors/python.d.plugin/oracledb/oracledb.chart.py
index 9490b6218..28ef8db10 100644
--- a/collectors/python.d.plugin/oracledb/oracledb.chart.py
+++ b/collectors/python.d.plugin/oracledb/oracledb.chart.py
@@ -9,11 +9,11 @@ from bases.FrameworkServices.SimpleService import SimpleService
try:
import cx_Oracle
+
HAS_ORACLE = True
except ImportError:
HAS_ORACLE = False
-
ORDER = [
'session_count',
'session_limit_usage',
@@ -34,6 +34,9 @@ ORDER = [
'tablespace_size',
'tablespace_usage',
'tablespace_usage_in_percent',
+ 'allocated_size',
+ 'allocated_usage',
+ 'allocated_usage_in_percent',
]
CHARTS = {
@@ -170,9 +173,20 @@ CHARTS = {
'options': [None, 'Usage', '%', 'tablespace', 'oracledb.tablespace_usage_in_percent', 'line'],
'lines': [],
},
+ 'allocated_size': {
+ 'options': [None, 'Size', 'B', 'tablespace', 'oracledb.allocated_size', 'line'],
+ 'lines': [],
+ },
+ 'allocated_usage': {
+ 'options': [None, 'Usage', 'B', 'tablespace', 'oracledb.allocated_usage', 'line'],
+ 'lines': [],
+ },
+ 'allocated_usage_in_percent': {
+ 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.allocated_usage_in_percent', 'line'],
+ 'lines': [],
+ },
}
-
CX_CONNECT_STRING = "{0}/{1}@//{2}/{3}"
QUERY_SYSTEM = '''
@@ -194,6 +208,27 @@ FROM
dba_tablespace_usage_metrics m
JOIN dba_tablespaces t ON m.tablespace_name = t.tablespace_name
'''
+QUERY_ALLOCATED = '''
+SELECT
+ nvl(b.tablespace_name,nvl(a.tablespace_name,'UNKNOWN')) tablespace_name,
+ bytes_alloc used_bytes,
+ bytes_alloc-nvl(bytes_free,0) max_bytes,
+ ((bytes_alloc-nvl(bytes_free,0))/ bytes_alloc)*100 used_percent
+FROM
+ (SELECT
+ sum(bytes) bytes_free,
+ tablespace_name
+ FROM sys.dba_free_space
+ GROUP BY tablespace_name
+ ) a,
+ (SELECT
+ sum(bytes) bytes_alloc,
+ tablespace_name
+ FROM sys.dba_data_files
+ GROUP BY tablespace_name
+ ) b
+WHERE a.tablespace_name (+) = b.tablespace_name
+'''
QUERY_ACTIVITIES_COUNT = '''
SELECT
name,
@@ -398,6 +433,26 @@ class Service(SimpleService):
data['{0}_tablespace_used'.format(name)] = int(used * 1000)
data['{0}_tablespace_used_in_percent'.format(name)] = int(used_in_percent * 1000)
+ # ALLOCATED SPACE
+ try:
+ rv = self.gather_allocated_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, offline, size, used, used_in_percent in rv:
+ # TODO: skip offline?
+ if not (not offline and self.charts):
+ continue
+ # TODO: remove inactive?
+ if name not in self.active_tablespaces:
+ self.active_tablespaces.add(name)
+ self.add_tablespace_to_charts(name)
+ data['{0}_allocated_size'.format(name)] = int(size * 1000)
+ data['{0}_allocated_used'.format(name)] = int(used * 1000)
+ data['{0}_allocated_used_in_percent'.format(name)] = int(used_in_percent * 1000)
+
return data or None
def gather_system_metrics(self):
@@ -613,6 +668,44 @@ class Service(SimpleService):
)
return metrics
+ def gather_allocated_metrics(self):
+ """
+ :return:
+
+ [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
+ ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
+ ['TEMP', 0.0, 3233177600.0, 0.0, 0],
+ ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
+ """
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_ALLOCATED)
+ for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
+ if used_bytes is None:
+ offline = True
+ used = 0
+ else:
+ offline = False
+ used = float(used_bytes)
+ if max_bytes is None:
+ size = 0
+ else:
+ size = float(max_bytes)
+ if used_percent is None:
+ used_percent = 0
+ else:
+ used_percent = float(used_percent)
+ metrics.append(
+ [
+ tablespace_name,
+ offline,
+ size,
+ used,
+ used_percent,
+ ]
+ )
+ return metrics
+
def gather_wait_time_metrics(self):
"""
:return:
@@ -712,3 +805,27 @@ class Service(SimpleService):
1,
1000,
])
+ self.charts['allocated_size'].add_dimension(
+ [
+ '{0}_allocated_size'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_usage'].add_dimension(
+ [
+ '{0}_allocated_used'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_usage_in_percent'].add_dimension(
+ [
+ '{0}_allocated_used_in_percent'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
index 053e3f0de..8fa8cb833 100644
--- a/collectors/python.d.plugin/ovpn_status_log/README.md
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -1,8 +1,14 @@
-# ovpn_status_log
+<!--
+title: "OpenVPN monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ovpn_status_log/README.md
+sidebar_label: "OpenVPN"
+-->
-Module monitor openvpn-status log file.
+# OpenVPN monitoring with Netdata
-**Requirements:**
+Parses server log files and provides summary (client, traffic) metrics.
+
+## Requirements
- If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
so that multiple instances do not overwrite each other's output files.
@@ -22,7 +28,15 @@ It produces:
- in
- out
-## configuration
+## Configuration
+
+Edit the `python.d/ovpn_status_log.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/ovpn_status_log.conf
+```
Sample:
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
index f094ab7c1..cfc87be36 100644
--- a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
@@ -7,7 +7,6 @@ import re
from bases.FrameworkServices.SimpleService import SimpleService
-
update_every = 10
ORDER = [
@@ -72,7 +71,7 @@ class Service(SimpleService):
break
if found:
return True
- self.error('Failed to parse ovpenvpn log file')
+ self.error('Failed to parse openvpn log file')
return False
def _get_raw_data(self):
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
index 5f8284330..9d0dbb580 100644
--- a/collectors/python.d.plugin/phpfpm/README.md
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -1,41 +1,47 @@
-# phpfpm
+<!--
+title: "PHP-FPM monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/phpfpm/README.md
+sidebar_label: "PHP-FPM"
+-->
-This module will monitor one or more php-fpm instances depending on configuration.
+# PHP-FPM monitoring with Netdata
-**Requirements:**
+Monitors one or more PHP-FPM instances depending on configuration.
-- php-fpm with enabled `status` page
-- access to `status` page via web server
-
-It produces following charts:
+## Requirements
-1. **Active Connections**
-
- - active
- - maxActive
- - idle
+- `PHP-FPM` with [enabled `status` page](https://easyengine.io/tutorials/php/fpm-status-page/)
+- access to `status` page via web server
-2. **Requests** in requests/s
+## Charts
- - requests
+It produces following charts:
-3. **Performance**
+- Active Connections in `connections`
+- Requests in `requests/s`
+- Performance in `status`
+- Requests Duration Among All Idle Processes in `milliseconds`
+- Last Request CPU Usage Among All Idle Processes in `percentage`
+- Last Request Memory Usage Among All Idle Processes in `KB`
- - reached
- - slow
+## Configuration
-## configuration
+Edit the `python.d/phpfpm.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-Needs only `url` to server's `status`
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/phpfpm.conf
+```
-Here is an example for local instance:
+Needs only `url` to server's `status`. Here is an example for local and remote instances:
```yaml
-update_every : 3
-priority : 90100
-
local:
- url : 'http://localhost/status'
+ url : 'http://localhost/status?full&json'
+
+remote:
+ url : 'http://203.0.113.10/status?full&json'
```
Without configuration, module attempts to connect to `http://localhost/status`
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
index d0e9960e0..226df99c6 100644
--- a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
@@ -9,7 +9,6 @@ import re
from bases.FrameworkServices.UrlService import UrlService
-
REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
POOL_INFO = [
@@ -71,7 +70,8 @@ CHARTS = {
]
},
'request_duration': {
- 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration',
+ 'options': [None, 'PHP-FPM Requests Duration Among All Idle Processes', 'milliseconds', 'request duration',
+ 'phpfpm.request_duration',
'line'],
'lines': [
['minReqDur', 'min', 'absolute', 1, 1000],
@@ -80,7 +80,8 @@ CHARTS = {
]
},
'request_cpu': {
- 'options': [None, 'PHP-FPM Request CPU', 'percentage', 'request CPU', 'phpfpm.request_cpu', 'line'],
+ 'options': [None, 'PHP-FPM Last Request CPU Usage Among All Idle Processes', 'percentage', 'request CPU',
+ 'phpfpm.request_cpu', 'line'],
'lines': [
['minReqCpu', 'min'],
['maxReqCpu', 'max'],
@@ -88,7 +89,8 @@ CHARTS = {
]
},
'request_mem': {
- 'options': [None, 'PHP-FPM Request Memory', 'KB', 'request memory', 'phpfpm.request_mem', 'line'],
+ 'options': [None, 'PHP-FPM Last Request Memory Usage Among All Idle Processes', 'KB', 'request memory',
+ 'phpfpm.request_mem', 'line'],
'lines': [
['minReqMem', 'min', 'absolute', 1, 1024],
['maxReqMem', 'max', 'absolute', 1, 1024],
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
index 2bbea10c7..35521b2ad 100644
--- a/collectors/python.d.plugin/portcheck/README.md
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -1,6 +1,12 @@
-# portcheck
+<!--
+title: "TCP endpoint monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/portcheck/README.md
+sidebar_label: "TCP endpoints"
+-->
-Module monitors a remote TCP service.
+# TCP endpoint monitoring with Netdata
+
+Monitors TCP endpoint availability and response time.
Following charts are drawn per host:
@@ -16,7 +22,15 @@ Following charts are drawn per host:
- Connection refused: port not listening or blocked
- Connection timed out: host or port unreachable
-## configuration
+## Configuration
+
+Edit the `python.d/portcheck.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/portcheck.conf
+```
```yaml
server:
diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
index 8479e38e4..818ac765d 100644
--- a/collectors/python.d.plugin/portcheck/portcheck.chart.py
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
@@ -12,7 +12,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService
-
PORT_LATENCY = 'connect'
PORT_SUCCESS = 'success'
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
index 5d2822c1d..53073ea8d 100644
--- a/collectors/python.d.plugin/postfix/README.md
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -1,6 +1,14 @@
-# postfix
+<!--
+title: "Postfix monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postfix/README.md
+sidebar_label: "Postfix"
+-->
-Simple module executing `postfix -p` to grab postfix queue.
+# Postfix monitoring with Netdata
+
+Monitors MTA email queue statistics using postqueue tool.
+
+Execute `postqueue -p` to grab postfix queue.
It produces only two charts:
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
index 29dd85a5c..dc9b18467 100644
--- a/collectors/python.d.plugin/postgres/README.md
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -1,10 +1,16 @@
-# postgres
+<!--
+title: "PostgreSQL monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postgres/README.md
+sidebar_label: "PostgreSQL"
+-->
-Module monitors one or more postgres servers.
+# PostgreSQL monitoring with Netdata
-**Requirements:**
+Collects database health and performance metrics.
-- `python-psycopg2` package. You have to install it manually.
+## Requirements
+
+- `python-psycopg2` package. You have to install it manually and make sure that it is available to the `netdata` user, either using `pip`, the package manager of your Linux distribution, or any other method you prefer.
Following charts are drawn:
@@ -16,50 +22,63 @@ Following charts are drawn:
- active
-3. **Write-Ahead Logging Statistics** files/s
+3. **Current Backend Process Usage** percentage
+
+ - used
+ - available
+
+4. **Write-Ahead Logging Statistics** files/s
- total
- ready
- done
-4. **Checkpoints** writes/s
+5. **Checkpoints** writes/s
- scheduled
- requested
-5. **Current connections to db** count
+6. **Current connections to db** count
- connections
-6. **Tuples returned from db** tuples/s
+7. **Tuples returned from db** tuples/s
- sequential
- bitmap
-7. **Tuple reads from db** reads/s
+8. **Tuple reads from db** reads/s
- disk
- cache
-8. **Transactions on db** transactions/s
+9. **Transactions on db** transactions/s
- committed
- rolled back
-9. **Tuples written to db** writes/s
+10. **Tuples written to db** writes/s
- inserted
- updated
- deleted
- conflicts
-10. **Locks on db** count per type
+11. **Locks on db** count per type
- locks
-## configuration
+## Configuration
-For all available options please see module [configuration file](postgres.conf).
+Edit the `python.d/postgres.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/postgres.conf
+```
+
+When no configuration file is found, the module tries to connect to TCP/IP socket: `localhost:5432`.
```yaml
socket:
@@ -75,8 +94,6 @@ tcp:
port : 5432
```
-When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
-
---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostgres%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
index 9e3020358..bd28dd9b7 100644
--- a/collectors/python.d.plugin/postgres/postgres.chart.py
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
@@ -39,6 +39,7 @@ CONN_PARAM_SSL_KEY = 'sslkey'
QUERY_NAME_WAL = 'WAL'
QUERY_NAME_ARCHIVE = 'ARCHIVE'
QUERY_NAME_BACKENDS = 'BACKENDS'
+QUERY_NAME_BACKEND_USAGE = 'BACKEND_USAGE'
QUERY_NAME_TABLE_STATS = 'TABLE_STATS'
QUERY_NAME_INDEX_STATS = 'INDEX_STATS'
QUERY_NAME_DATABASE = 'DATABASE'
@@ -76,6 +77,10 @@ METRICS = {
'backends_active',
'backends_idle'
],
+ QUERY_NAME_BACKEND_USAGE: [
+ 'available',
+ 'used'
+ ],
QUERY_NAME_INDEX_STATS: [
'index_count',
'index_size'
@@ -139,6 +144,10 @@ METRICS = {
NO_VERSION = 0
DEFAULT = 'DEFAULT'
+V72 = 'V72'
+V82 = 'V82'
+V91 = 'V91'
+V92 = 'V92'
V96 = 'V96'
V10 = 'V10'
V11 = 'V11'
@@ -235,6 +244,76 @@ FROM pg_stat_activity;
""",
}
+QUERY_BACKEND_USAGE = {
+ DEFAULT: """
+SELECT
+ COUNT(1) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - COUNT(1) AS available
+FROM pg_catalog.pg_stat_activity
+WHERE backend_type IN ('client backend', 'background worker');
+""",
+ V10: """
+SELECT
+ SUM(s.conn) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - SUM(s.conn) AS available
+FROM (
+ SELECT 's' as type, COUNT(1) as conn
+ FROM pg_catalog.pg_stat_activity
+ WHERE backend_type IN ('client backend', 'background worker')
+ UNION ALL
+ SELECT 'r', COUNT(1)
+ FROM pg_catalog.pg_stat_replication
+) as s;
+""",
+ V92: """
+SELECT
+ SUM(s.conn) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - SUM(s.conn) AS available
+FROM (
+ SELECT 's' as type, COUNT(1) as conn
+ FROM pg_catalog.pg_stat_activity
+ WHERE query NOT LIKE 'autovacuum: %%'
+ UNION ALL
+ SELECT 'r', COUNT(1)
+ FROM pg_catalog.pg_stat_replication
+) as s;
+""",
+ V91: """
+SELECT
+ SUM(s.conn) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - SUM(s.conn) AS available
+FROM (
+ SELECT 's' as type, COUNT(1) as conn
+ FROM pg_catalog.pg_stat_activity
+ WHERE current_query NOT LIKE 'autovacuum: %%'
+ UNION ALL
+ SELECT 'r', COUNT(1)
+ FROM pg_catalog.pg_stat_replication
+) as s;
+""",
+ V82: """
+SELECT
+ COUNT(1) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - COUNT(1) AS available
+FROM pg_catalog.pg_stat_activity
+WHERE current_query NOT LIKE 'autovacuum: %%';
+""",
+ V72: """
+SELECT
+ COUNT(1) as used,
+ current_setting('max_connections')::int - current_setting('superuser_reserved_connections')::int
+ - COUNT(1) AS available
+FROM pg_catalog.pg_stat_activity s
+JOIN pg_catalog.pg_database d ON d.oid = s.datid
+WHERE d.datallowconn;
+""",
+}
+
QUERY_TABLE_STATS = {
DEFAULT: """
SELECT
@@ -315,7 +394,7 @@ FROM pg_stat_database
WHERE
has_database_privilege(
(SELECT current_user), datname, 'connect')
- AND NOT datname ~* '^template\d ';
+ AND NOT datname ~* '^template\d';
""",
}
@@ -528,10 +607,21 @@ SELECT
""",
}
-
def query_factory(name, version=NO_VERSION):
if name == QUERY_NAME_BACKENDS:
return QUERY_BACKEND[DEFAULT]
+ elif name == QUERY_NAME_BACKEND_USAGE:
+ if version < 80200:
+ return QUERY_BACKEND_USAGE[V72]
+ if version < 90100:
+ return QUERY_BACKEND_USAGE[V82]
+ if version < 90200:
+ return QUERY_BACKEND_USAGE[V91]
+ if version < 100000:
+ return QUERY_BACKEND_USAGE[V92]
+ elif version < 120000:
+ return QUERY_BACKEND_USAGE[V10]
+ return QUERY_BACKEND_USAGE[DEFAULT]
elif name == QUERY_NAME_TABLE_STATS:
return QUERY_TABLE_STATS[DEFAULT]
elif name == QUERY_NAME_INDEX_STATS:
@@ -588,6 +678,7 @@ ORDER = [
'db_stat_connections',
'database_size',
'backend_process',
+ 'backend_usage',
'index_count',
'index_size',
'table_count',
@@ -674,6 +765,13 @@ CHARTS = {
['backends_idle', 'idle', 'absolute']
]
},
+ 'backend_usage': {
+ 'options': [None, '% of Connections in use', 'percentage', 'backend processes', 'postgres.backend_usage', 'stacked'],
+ 'lines': [
+ ['available', 'available', 'percentage-of-absolute-row'],
+ ['used', 'used', 'percentage-of-absolute-row']
+ ]
+ },
'index_count': {
'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
'lines': [
@@ -970,6 +1068,7 @@ class Service(SimpleService):
def populate_queries(self):
self.queries[query_factory(QUERY_NAME_DATABASE)] = METRICS[QUERY_NAME_DATABASE]
self.queries[query_factory(QUERY_NAME_BACKENDS)] = METRICS[QUERY_NAME_BACKENDS]
+ self.queries[query_factory(QUERY_NAME_BACKEND_USAGE, self.server_version)] = METRICS[QUERY_NAME_BACKEND_USAGE]
self.queries[query_factory(QUERY_NAME_LOCKS)] = METRICS[QUERY_NAME_LOCKS]
self.queries[query_factory(QUERY_NAME_BGWRITER)] = METRICS[QUERY_NAME_BGWRITER]
self.queries[query_factory(QUERY_NAME_DIFF_LSN, self.server_version)] = METRICS[QUERY_NAME_WAL_WRITES]
@@ -1063,7 +1162,7 @@ def zero_lock_types(databases):
def hide_password(config):
- return dict((k, v if k != 'password' else '*****') for k, v in config.items())
+ return dict((k, v if k != 'password' or not v else '*****') for k, v in config.items())
def add_database_lock_chart(order, definitions, database_name):
diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf
index 3dd461408..1970a7a27 100644
--- a/collectors/python.d.plugin/postgres/postgres.conf
+++ b/collectors/python.d.plugin/postgres/postgres.conf
@@ -81,7 +81,7 @@
# sslkey : path/to/key # the location of the client key file
#
# SSL connection parameters description: https://www.postgresql.org/docs/current/libpq-ssl.html
-#
+#
# Additionally, the following options allow selective disabling of charts
#
# table_stats : false
@@ -93,6 +93,10 @@
# a postgres user for netdata and add its password below to allow
# netdata connect.
#
+# Please note that when running Postgres from inside the container,
+# the client (Netdata) is not considered local, unless it runs from inside
+# the same container.
+#
# Postgres supported versions are :
# - 9.3 (without autovacuum)
# - 9.4
@@ -116,6 +120,7 @@ tcp:
name : 'local'
database : 'postgres'
user : 'postgres'
+ password : 'postgres'
host : 'localhost'
port : 5432
@@ -123,6 +128,7 @@ tcpipv4:
name : 'local'
database : 'postgres'
user : 'postgres'
+ password : 'postgres'
host : '127.0.0.1'
port : 5432
@@ -130,5 +136,6 @@ tcpipv6:
name : 'local'
database : 'postgres'
user : 'postgres'
+ password : 'postgres'
host : '::1'
port : 5432
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
index ac6e12f3a..610a665de 100644
--- a/collectors/python.d.plugin/powerdns/README.md
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -1,6 +1,12 @@
-# powerdns
+<!--
+title: "PowerDNS monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/powerdns/README.md
+sidebar_label: "PowerDNS"
+-->
-Module monitor powerdns performance and health metrics.
+# PowerDNS monitoring with Netdata
+
+Monitors authoritative server and recursor statistics.
Powerdns charts:
@@ -75,7 +81,15 @@ Powerdns charts:
- packetcache-entries
- negcache-entries
-## configuration
+## Configuration
+
+Edit the `python.d/powerdns.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/powerdns.conf
+```
```yaml
local:
diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
index bcf9f0d2d..b951e0c1a 100644
--- a/collectors/python.d.plugin/powerdns/powerdns.chart.py
+++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py
@@ -8,7 +8,6 @@ from json import loads
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'questions',
'cache_usage',
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
index 23a67751a..f1b369a44 100644
--- a/collectors/python.d.plugin/proxysql/README.md
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -1,6 +1,21 @@
-# proxysql
+<!--
+title: "ProxySQL monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/proxysql/README.md
+sidebar_label: "ProxySQL"
+-->
-This module monitors proxysql backend and frontend performance metrics.
+# ProxySQL monitoring with Netdata
+
+Monitors database backend and frontend performance metrics.
+
+## Requirements
+
+- python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
+- `netdata` local user to connect to the ProxySQL server.
+
+To create the `netdata` user, follow [the documentation](https://github.com/sysown/proxysql/wiki/Users-configuration#creating-a-new-user).
+
+## Charts
It produces:
@@ -16,7 +31,7 @@ It produces:
- questions: total number of queries sent from frontends
- slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time`
-3. **Overall Bandwith (backends)**
+3. **Overall Bandwidth (backends)**
- in
- out
@@ -30,7 +45,7 @@ It produces:
- `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work
- `-1`: Unknown status
-5. **Bandwith (backends)**
+5. **Bandwidth (backends)**
- Backends
- in
@@ -65,7 +80,15 @@ It produces:
- Commands
- 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one.
-## configuration
+## Configuration
+
+Edit the `python.d/proxysql.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/proxysql.conf
+```
```yaml
tcpipv4:
diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py
index c97147486..982c28ee7 100644
--- a/collectors/python.d.plugin/proxysql/proxysql.chart.py
+++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py
@@ -189,7 +189,8 @@ CHARTS = {
'lines': []
},
'commands_duration': {
- 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration', 'line'],
+ 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration',
+ 'line'],
'lines': []
}
}
@@ -289,7 +290,7 @@ class Service(MySQLService):
@staticmethod
def histogram_chart(cmd):
return [
- 'commands_historgram_' + cmd['name'],
+ 'commands_histogram_' + cmd['name'],
None,
'ProxySQL {0} Command Histogram'.format(cmd['name'].title()),
'commands',
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
index 295db0140..9b7c0a2c3 100644
--- a/collectors/python.d.plugin/puppet/README.md
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -1,4 +1,10 @@
-# puppet
+<!--
+title: "Puppet monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/puppet/README.md
+sidebar_label: "Puppet"
+-->
+
+# Puppet monitoring with Netdata
Monitor status of Puppet Server and Puppet DB.
@@ -24,7 +30,15 @@ Following charts are drawn:
- max
- used
-## configuration
+## Configuration
+
+Edit the `python.d/puppet.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/puppet.conf
+```
```yaml
puppetdb:
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
index 30e219da4..f8adf6006 100644
--- a/collectors/python.d.plugin/puppet/puppet.chart.py
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
@@ -12,14 +12,12 @@
#
import socket
-
from json import loads
from bases.FrameworkServices.UrlService import UrlService
update_every = 5
-
MiB = 1 << 20
CPU_SCALE = 1000
@@ -83,7 +81,7 @@ class Service(UrlService):
# NOTE: there are several ways to retrieve data
# 1. Only PE versions:
# https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html
- # 2. Inidividual Metrics API (JMX):
+ # 2. Individual Metrics API (JMX):
# https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html
# 3. Extended status at debug level:
# https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html
@@ -108,8 +106,8 @@ class Service(UrlService):
non_heap_mem = jvm_metrics['non-heap-memory']
for k in ['max', 'committed', 'used', 'init']:
- data['jvm_heap_'+k] = heap_mem[k]
- data['jvm_nonheap_'+k] = non_heap_mem[k]
+ data['jvm_heap_' + k] = heap_mem[k]
+ data['jvm_nonheap_' + k] = non_heap_mem[k]
fd_open = jvm_metrics['file-descriptors']
data['fd_max'] = fd_open['max']
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
index 08d59c4d3..61cfd6093 100644
--- a/collectors/python.d.plugin/python.d.conf
+++ b/collectors/python.d.plugin/python.d.conf
@@ -29,7 +29,9 @@ gc_interval: 300
# apache_cache has been replaced by web_log
# adaptec_raid: yes
+# alarms: yes
# am2320: yes
+# anomalies: no
apache_cache: no
# beanstalk: yes
# bind_rndc: yes
@@ -58,6 +60,7 @@ gunicorn_log: no
# haproxy: yes
# hddtemp: yes
# httpcheck: yes
+hpssa: no
# icecast: yes
# ipfs: yes
# isc_dhcpd: yes
diff --git a/collectors/python.d.plugin/python.d.plugin b/collectors/python.d.plugin/python.d.plugin
deleted file mode 100644
index 7b27acdd5..000000000
--- a/collectors/python.d.plugin/python.d.plugin
+++ /dev/null
@@ -1,784 +0,0 @@
-#!/usr/bin/env bash
-'''':;
-pybinary=$(which python || which python3 || which python2)
-filtered=()
-for arg in "$@"
-do
- case $arg in
- -p*) pybinary=${arg:2}
- shift 1 ;;
- *) filtered+=("$arg") ;;
- esac
-done
-if [ "$pybinary" = "" ]
-then
- echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM"
- exit 1
-fi
-exec "$pybinary" "$0" "${filtered[@]}" # '''
-
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (l2isbad)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import collections
-import copy
-import gc
-import json
-import os
-import pprint
-import re
-import sys
-import time
-import threading
-import types
-
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-PY_VERSION = sys.version_info[:2] # (major=3, minor=7, micro=3, releaselevel='final', serial=0)
-
-
-if PY_VERSION > (3, 1):
- from importlib.machinery import SourceFileLoader
-else:
- from imp import load_source as SourceFileLoader
-
-
-ENV_NETDATA_USER_CONFIG_DIR = 'NETDATA_USER_CONFIG_DIR'
-ENV_NETDATA_STOCK_CONFIG_DIR = 'NETDATA_STOCK_CONFIG_DIR'
-ENV_NETDATA_PLUGINS_DIR = 'NETDATA_PLUGINS_DIR'
-ENV_NETDATA_LIB_DIR = 'NETDATA_LIB_DIR'
-ENV_NETDATA_UPDATE_EVERY = 'NETDATA_UPDATE_EVERY'
-
-
-def add_pythond_packages():
- pluginsd = os.getenv(ENV_NETDATA_PLUGINS_DIR, os.path.dirname(__file__))
- pythond = os.path.abspath(pluginsd + '/../python.d')
- packages = os.path.join(pythond, 'python_modules')
- sys.path.append(packages)
-
-
-add_pythond_packages()
-
-
-from bases.collection import safe_print
-from bases.loggers import PythonDLogger
-from bases.loaders import load_config
-
-try:
- from collections import OrderedDict
-except ImportError:
- from third_party.ordereddict import OrderedDict
-
-
-def dirs():
- var_lib = os.getenv(
- ENV_NETDATA_LIB_DIR,
- '/var/lib/netdata',
- )
- plugin_user_config = os.getenv(
- ENV_NETDATA_USER_CONFIG_DIR,
- '/etc/netdata',
- )
- plugin_stock_config = os.getenv(
- ENV_NETDATA_STOCK_CONFIG_DIR,
- '/usr/lib/netdata/conf.d',
- )
- pluginsd = os.getenv(
- ENV_NETDATA_PLUGINS_DIR,
- os.path.dirname(__file__),
- )
- modules_user_config = os.path.join(plugin_user_config, 'python.d')
- modules_stock_config = os.path.join(plugin_stock_config, 'python.d')
- modules = os.path.abspath(pluginsd + '/../python.d')
-
- Dirs = collections.namedtuple(
- 'Dirs',
- [
- 'plugin_user_config',
- 'plugin_stock_config',
- 'modules_user_config',
- 'modules_stock_config',
- 'modules',
- 'var_lib',
- ]
- )
- return Dirs(
- plugin_user_config,
- plugin_stock_config,
- modules_user_config,
- modules_stock_config,
- modules,
- var_lib,
- )
-
-
-DIRS = dirs()
-
-IS_ATTY = sys.stdout.isatty()
-
-MODULE_SUFFIX = '.chart.py'
-
-
-def available_modules():
- obsolete = (
- 'apache_cache', # replaced by web_log
- 'cpuidle', # rewritten in C
- 'cpufreq', # rewritten in C
- 'gunicorn_log', # replaced by web_log
- 'linux_power_supply', # rewritten in C
- 'nginx_log', # replaced by web_log
- 'mdstat', # rewritten in C
- 'sslcheck', # rewritten in Go, memory leak bug https://github.com/netdata/netdata/issues/5624
- 'unbound', # rewritten in Go
- )
-
- files = sorted(os.listdir(DIRS.modules))
- modules = [m[:-len(MODULE_SUFFIX)] for m in files if m.endswith(MODULE_SUFFIX)]
- avail = [m for m in modules if m not in obsolete]
- return tuple(avail)
-
-
-AVAILABLE_MODULES = available_modules()
-
-JOB_BASE_CONF = {
- 'update_every': int(os.getenv(ENV_NETDATA_UPDATE_EVERY, 1)),
- 'priority': 60000,
- 'autodetection_retry': 0,
- 'chart_cleanup': 10,
- 'penalty': True,
- 'name': str(),
-}
-
-PLUGIN_BASE_CONF = {
- 'enabled': True,
- 'default_run': True,
- 'gc_run': True,
- 'gc_interval': 300,
-}
-
-
-def multi_path_find(name, *paths):
- for path in paths:
- abs_name = os.path.join(path, name)
- if os.path.isfile(abs_name):
- return abs_name
- return str()
-
-
-def load_module(name):
- abs_path = os.path.join(DIRS.modules, '{0}{1}'.format(name, MODULE_SUFFIX))
- module = SourceFileLoader(name, abs_path)
- if isinstance(module, types.ModuleType):
- return module
- return module.load_module()
-
-
-class ModuleConfig:
- def __init__(self, name, config=None):
- self.name = name
- self.config = config or OrderedDict()
-
- def load(self, abs_path):
- self.config.update(load_config(abs_path) or dict())
-
- def defaults(self):
- keys = (
- 'update_every',
- 'priority',
- 'autodetection_retry',
- 'chart_cleanup',
- 'penalty',
- )
- return dict((k, self.config[k]) for k in keys if k in self.config)
-
- def create_job(self, job_name, job_config=None):
- job_config = job_config or dict()
-
- config = OrderedDict()
- config.update(job_config)
- config['job_name'] = job_name
- for k, v in self.defaults().items():
- config.setdefault(k, v)
-
- return config
-
- def job_names(self):
- return [v for v in self.config if isinstance(self.config.get(v), dict)]
-
- def single_job(self):
- return [self.create_job(self.name, self.config)]
-
- def multi_job(self):
- return [self.create_job(n, self.config[n]) for n in self.job_names()]
-
- def create_jobs(self):
- return self.multi_job() or self.single_job()
-
-
-class JobsConfigsBuilder:
- def __init__(self, config_dirs):
- self.config_dirs = config_dirs
- self.log = PythonDLogger()
- self.job_defaults = None
- self.module_defaults = None
- self.min_update_every = None
-
- def load_module_config(self, module_name):
- name = '{0}.conf'.format(module_name)
- self.log.debug("[{0}] looking for '{1}' in {2}".format(module_name, name, self.config_dirs))
- config = ModuleConfig(module_name)
-
- abs_path = multi_path_find(name, *self.config_dirs)
- if not abs_path:
- self.log.warning("[{0}] '{1}' was not found".format(module_name, name))
- return config
-
- self.log.debug("[{0}] loading '{1}'".format(module_name, abs_path))
- try:
- config.load(abs_path)
- except Exception as error:
- self.log.error("[{0}] error on loading '{1}' : {2}".format(module_name, abs_path, repr(error)))
- return None
-
- self.log.debug("[{0}] '{1}' is loaded".format(module_name, abs_path))
- return config
-
- @staticmethod
- def apply_defaults(jobs, defaults):
- if defaults is None:
- return
- for k, v in defaults.items():
- for job in jobs:
- job.setdefault(k, v)
-
- def set_min_update_every(self, jobs, min_update_every):
- if min_update_every is None:
- return
- for job in jobs:
- if 'update_every' in job and job['update_every'] < self.min_update_every:
- job['update_every'] = self.min_update_every
-
- def build(self, module_name):
- config = self.load_module_config(module_name)
- if config is None:
- return None
-
- configs = config.create_jobs()
- self.log.info("[{0}] built {1} job(s) configs".format(module_name, len(configs)))
-
- self.apply_defaults(configs, self.module_defaults)
- self.apply_defaults(configs, self.job_defaults)
- self.set_min_update_every(configs, self.min_update_every)
-
- return configs
-
-
-JOB_STATUS_ACTIVE = 'active'
-JOB_STATUS_RECOVERING = 'recovering'
-JOB_STATUS_DROPPED = 'dropped'
-JOB_STATUS_INIT = 'initial'
-
-
-class Job(threading.Thread):
- inf = -1
-
- def __init__(self, service, module_name, config):
- threading.Thread.__init__(self)
- self.daemon = True
- self.service = service
- self.module_name = module_name
- self.config = config
- self.real_name = config['job_name']
- self.actual_name = config['override_name'] or self.real_name
- self.autodetection_retry = config['autodetection_retry']
- self.checks = self.inf
- self.job = None
- self.status = JOB_STATUS_INIT
-
- def is_inited(self):
- return self.job is not None
-
- def init(self):
- self.job = self.service(configuration=copy.deepcopy(self.config))
-
- def check(self):
- ok = self.job.check()
- self.checks -= self.checks != self.inf and not ok
- return ok
-
- def create(self):
- self.job.create()
-
- def need_to_recheck(self):
- return self.autodetection_retry != 0 and self.checks != 0
-
- def run(self):
- self.job.run()
-
-
-class ModuleSrc:
- def __init__(self, name):
- self.name = name
- self.src = None
-
- def load(self):
- self.src = load_module(self.name)
-
- def get(self, key):
- return getattr(self.src, key, None)
-
- def service(self):
- return self.get('Service')
-
- def defaults(self):
- keys = (
- 'update_every',
- 'priority',
- 'autodetection_retry',
- 'chart_cleanup',
- 'penalty',
- )
- return dict((k, self.get(k)) for k in keys if self.get(k) is not None)
-
- def is_disabled_by_default(self):
- return bool(self.get('disabled_by_default'))
-
-
-class JobsStatuses:
- def __init__(self):
- self.items = OrderedDict()
-
- def dump(self):
- return json.dumps(self.items, indent=2)
-
- def get(self, module_name, job_name):
- if module_name not in self.items:
- return None
- return self.items[module_name].get(job_name)
-
- def has(self, module_name, job_name):
- return self.get(module_name, job_name) is not None
-
- def from_file(self, path):
- with open(path) as f:
- data = json.load(f)
- return self.from_json(data)
-
- @staticmethod
- def from_json(items):
- if not isinstance(items, dict):
- raise Exception('items obj has wrong type : {0}'.format(type(items)))
- if not items:
- return JobsStatuses()
-
- v = OrderedDict()
- for mod_name in sorted(items):
- if not items[mod_name]:
- continue
- v[mod_name] = OrderedDict()
- for job_name in sorted(items[mod_name]):
- v[mod_name][job_name] = items[mod_name][job_name]
-
- rv = JobsStatuses()
- rv.items = v
- return rv
-
- @staticmethod
- def from_jobs(jobs):
- v = OrderedDict()
- for job in jobs:
- status = job.status
- if status not in (JOB_STATUS_ACTIVE, JOB_STATUS_RECOVERING):
- continue
- if job.module_name not in v:
- v[job.module_name] = OrderedDict()
- v[job.module_name][job.real_name] = status
-
- rv = JobsStatuses()
- rv.items = v
- return rv
-
-
-class StdoutSaver:
- @staticmethod
- def save(dump):
- print(dump)
-
-
-class CachedFileSaver:
- def __init__(self, path):
- self.last_save_success = False
- self.last_saved_dump = str()
- self.path = path
-
- def save(self, dump):
- if self.last_save_success and self.last_saved_dump == dump:
- return
- try:
- with open(self.path, 'w') as out:
- out.write(dump)
- except Exception:
- self.last_save_success = False
- raise
- self.last_saved_dump = dump
- self.last_save_success = True
-
-
-class PluginConfig(dict):
- def __init__(self, *args):
- dict.__init__(self, *args)
-
- def is_module_explicitly_enabled(self, module_name):
- return self._is_module_enabled(module_name, True)
-
- def is_module_enabled(self, module_name):
- return self._is_module_enabled(module_name, False)
-
- def _is_module_enabled(self, module_name, explicit):
- if module_name in self:
- return self[module_name]
- if explicit:
- return False
- return self['default_run']
-
-
-class Plugin:
- config_name = 'python.d.conf'
- jobs_status_dump_name = 'pythond-jobs-statuses.json'
-
- def __init__(self, modules_to_run, min_update_every):
- self.modules_to_run = modules_to_run
- self.min_update_every = min_update_every
- self.config = PluginConfig(PLUGIN_BASE_CONF)
- self.log = PythonDLogger()
- self.started_jobs = collections.defaultdict(dict)
- self.jobs = list()
- self.saver = None
- self.runs = 0
-
- def load_config(self):
- paths = [
- DIRS.plugin_user_config,
- DIRS.plugin_stock_config,
- ]
- self.log.debug("looking for '{0}' in {1}".format(self.config_name, paths))
- abs_path = multi_path_find(self.config_name, *paths)
- if not abs_path:
- self.log.warning("'{0}' was not found, using defaults".format(self.config_name))
- return True
-
- self.log.debug("loading '{0}'".format(abs_path))
- try:
- config = load_config(abs_path)
- except Exception as error:
- self.log.error("error on loading '{0}' : {1}".format(abs_path, repr(error)))
- return False
-
- self.log.debug("'{0}' is loaded".format(abs_path))
- self.config.update(config)
- return True
-
- def load_job_statuses(self):
- self.log.debug("looking for '{0}' in {1}".format(self.jobs_status_dump_name, DIRS.var_lib))
- abs_path = multi_path_find(self.jobs_status_dump_name, DIRS.var_lib)
- if not abs_path:
- self.log.warning("'{0}' was not found".format(self.jobs_status_dump_name))
- return
-
- self.log.debug("loading '{0}'".format(abs_path))
- try:
- statuses = JobsStatuses().from_file(abs_path)
- except Exception as error:
- self.log.warning("error on loading '{0}' : {1}".format(abs_path, repr(error)))
- return None
- self.log.debug("'{0}' is loaded".format(abs_path))
- return statuses
-
- def create_jobs(self, job_statuses=None):
- paths = [
- DIRS.modules_user_config,
- DIRS.modules_stock_config,
- ]
-
- builder = JobsConfigsBuilder(paths)
- builder.job_defaults = JOB_BASE_CONF
- builder.min_update_every = self.min_update_every
-
- jobs = list()
- for mod_name in self.modules_to_run:
- if not self.config.is_module_enabled(mod_name):
- self.log.info("[{0}] is disabled in the configuration file, skipping it".format(mod_name))
- continue
-
- src = ModuleSrc(mod_name)
- try:
- src.load()
- except Exception as error:
- self.log.warning("[{0}] error on loading source : {1}, skipping it".format(mod_name, repr(error)))
- continue
-
- if not (src.service() and callable(src.service())):
- self.log.warning("[{0}] has no callable Service object, skipping it".format(mod_name))
- continue
-
- if src.is_disabled_by_default() and not self.config.is_module_explicitly_enabled(mod_name):
- self.log.info("[{0}] is disabled by default, skipping it".format(mod_name))
- continue
-
- builder.module_defaults = src.defaults()
- configs = builder.build(mod_name)
- if not configs:
- self.log.info("[{0}] has no job configs, skipping it".format(mod_name))
- continue
-
- for config in configs:
- config['job_name'] = re.sub(r'\s+', '_', config['job_name'])
- config['override_name'] = re.sub(r'\s+', '_', config.pop('name'))
-
- job = Job(src.service(), mod_name, config)
-
- was_previously_active = job_statuses and job_statuses.has(job.module_name, job.real_name)
- if was_previously_active and job.autodetection_retry == 0:
- self.log.debug('{0}[{1}] was previously active, applying recovering settings'.format(
- job.module_name, job.real_name))
- job.checks = 11
- job.autodetection_retry = 30
-
- jobs.append(job)
-
- return jobs
-
- def setup(self):
- if not self.load_config():
- return False
-
- if not self.config['enabled']:
- self.log.info('disabled in the configuration file')
- return False
-
- statuses = self.load_job_statuses()
-
- self.jobs = self.create_jobs(statuses)
- if not self.jobs:
- self.log.info('no jobs to run')
- return False
-
- if not IS_ATTY:
- abs_path = os.path.join(DIRS.var_lib, self.jobs_status_dump_name)
- self.saver = CachedFileSaver(abs_path)
- return True
-
- def start_jobs(self, *jobs):
- for job in jobs:
- if job.status not in (JOB_STATUS_INIT, JOB_STATUS_RECOVERING):
- continue
-
- if job.actual_name in self.started_jobs[job.module_name]:
- self.log.info('{0}[{1}] : already served by another job, skipping it'.format(
- job.module_name, job.real_name))
- job.status = JOB_STATUS_DROPPED
- continue
-
- if not job.is_inited():
- try:
- job.init()
- except Exception as error:
- self.log.warning("{0}[{1}] : unhandled exception on init : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
- job.status = JOB_STATUS_DROPPED
- continue
-
- try:
- ok = job.check()
- except Exception as error:
- self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
- job.status = JOB_STATUS_DROPPED
- continue
- if not ok:
- self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.real_name))
- job.status = JOB_STATUS_RECOVERING if job.need_to_recheck() else JOB_STATUS_DROPPED
- continue
- self.log.info('{0}[{1}] : check success'.format(job.module_name, job.real_name))
-
- try:
- job.create()
- except Exception as error:
- self.log.warning("{0}[{1}] : unhandled exception on create : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
- job.status = JOB_STATUS_DROPPED
- continue
-
- self.started_jobs[job.module_name] = job.actual_name
- job.status = JOB_STATUS_ACTIVE
- job.start()
-
- @staticmethod
- def keep_alive():
- if not IS_ATTY:
- safe_print('\n')
-
- def garbage_collection(self):
- if self.config['gc_run'] and self.runs % self.config['gc_interval'] == 0:
- v = gc.collect()
- self.log.debug('GC collection run result: {0}'.format(v))
-
- def restart_recovering_jobs(self):
- for job in self.jobs:
- if job.status != JOB_STATUS_RECOVERING:
- continue
- if self.runs % job.autodetection_retry != 0:
- continue
- self.start_jobs(job)
-
- def cleanup_jobs(self):
- self.jobs = [j for j in self.jobs if j.status != JOB_STATUS_DROPPED]
-
- def have_alive_jobs(self):
- return next(
- (True for job in self.jobs if job.status in (JOB_STATUS_RECOVERING, JOB_STATUS_ACTIVE)),
- False,
- )
-
- def save_job_statuses(self):
- if self.saver is None:
- return
- if self.runs % 10 != 0:
- return
- dump = JobsStatuses().from_jobs(self.jobs).dump()
- try:
- self.saver.save(dump)
- except Exception as error:
- self.log.error("error on saving jobs statuses dump : {0}".format(repr(error)))
-
- def serve_once(self):
- if not self.have_alive_jobs():
- self.log.info('no jobs to serve')
- return False
-
- time.sleep(1)
- self.runs += 1
-
- self.keep_alive()
- self.garbage_collection()
- self.cleanup_jobs()
- self.restart_recovering_jobs()
- self.save_job_statuses()
- return True
-
- def serve(self):
- while self.serve_once():
- pass
-
- def run(self):
- self.start_jobs(*self.jobs)
- self.serve()
-
-
-def parse_command_line():
- opts = sys.argv[:][1:]
-
- debug = False
- trace = False
- update_every = 1
- modules_to_run = list()
-
- def find_first_positive_int(values):
- return next((v for v in values if v.isdigit() and int(v) >= 1), None)
-
- u = find_first_positive_int(opts)
- if u is not None:
- update_every = int(u)
- opts.remove(u)
- if 'debug' in opts:
- debug = True
- opts.remove('debug')
- if 'trace' in opts:
- trace = True
- opts.remove('trace')
- if opts:
- modules_to_run = list(opts)
-
- cmd = collections.namedtuple(
- 'CMD',
- [
- 'update_every',
- 'debug',
- 'trace',
- 'modules_to_run',
- ])
- return cmd(
- update_every,
- debug,
- trace,
- modules_to_run
- )
-
-
-def guess_module(modules, *names):
- def guess(n):
- found = None
- for i, _ in enumerate(n):
- cur = [x for x in modules if x.startswith(name[:i + 1])]
- if not cur:
- return found
- found = cur
- return found
-
- guessed = list()
- for name in names:
- name = name.lower()
- m = guess(name)
- if m:
- guessed.extend(m)
- return sorted(set(guessed))
-
-
-def disable():
- if not IS_ATTY:
- safe_print('DISABLE')
- exit(0)
-
-
-def main():
- cmd = parse_command_line()
- log = PythonDLogger()
-
- if cmd.debug:
- log.logger.severity = 'DEBUG'
- if cmd.trace:
- log.log_traceback = True
-
- log.info('using python v{0}'.format(PY_VERSION[0]))
-
- unknown = set(cmd.modules_to_run) - set(AVAILABLE_MODULES)
- if unknown:
- log.error('unknown modules : {0}'.format(sorted(list(unknown))))
- guessed = guess_module(AVAILABLE_MODULES, *cmd.modules_to_run)
- if guessed:
- log.info('probably you meant : \n{0}'.format(pprint.pformat(guessed, width=1)))
- return
-
- p = Plugin(
- cmd.modules_to_run or AVAILABLE_MODULES,
- cmd.update_every,
- )
-
- try:
- if not p.setup():
- return
- p.run()
- except KeyboardInterrupt:
- pass
- log.info('exiting from main...')
-
-
-if __name__ == "__main__":
- main()
- disable()
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
index 44b6671cb..9d575d86f 100644
--- a/collectors/python.d.plugin/python.d.plugin.in
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -42,18 +42,17 @@ except ImportError:
PY_VERSION = sys.version_info[:2] # (major=3, minor=7, micro=3, releaselevel='final', serial=0)
-
if PY_VERSION > (3, 1):
from importlib.machinery import SourceFileLoader
else:
from imp import load_source as SourceFileLoader
-
ENV_NETDATA_USER_CONFIG_DIR = 'NETDATA_USER_CONFIG_DIR'
ENV_NETDATA_STOCK_CONFIG_DIR = 'NETDATA_STOCK_CONFIG_DIR'
ENV_NETDATA_PLUGINS_DIR = 'NETDATA_PLUGINS_DIR'
ENV_NETDATA_LIB_DIR = 'NETDATA_LIB_DIR'
ENV_NETDATA_UPDATE_EVERY = 'NETDATA_UPDATE_EVERY'
+ENV_NETDATA_LOCK_DIR = 'NETDATA_LOCK_DIR'
def add_pythond_packages():
@@ -65,10 +64,10 @@ def add_pythond_packages():
add_pythond_packages()
-
from bases.collection import safe_print
from bases.loggers import PythonDLogger
from bases.loaders import load_config
+from third_party import filelock
try:
from collections import OrderedDict
@@ -93,6 +92,10 @@ def dirs():
ENV_NETDATA_PLUGINS_DIR,
os.path.dirname(__file__),
)
+ locks = os.getenv(
+ ENV_NETDATA_LOCK_DIR,
+ os.path.join('@varlibdir_POST@', 'lock')
+ )
modules_user_config = os.path.join(plugin_user_config, 'python.d')
modules_stock_config = os.path.join(plugin_stock_config, 'python.d')
modules = os.path.abspath(pluginsd + '/../python.d')
@@ -106,6 +109,7 @@ def dirs():
'modules_stock_config',
'modules',
'var_lib',
+ 'locks',
]
)
return Dirs(
@@ -115,6 +119,7 @@ def dirs():
modules_stock_config,
modules,
var_lib,
+ locks,
)
@@ -173,7 +178,7 @@ def multi_path_find(name, *paths):
def load_module(name):
abs_path = os.path.join(DIRS.modules, '{0}{1}'.format(name, MODULE_SUFFIX))
- module = SourceFileLoader(name, abs_path)
+ module = SourceFileLoader('pythond_' + name, abs_path)
if isinstance(module, types.ModuleType):
return module
return module.load_module()
@@ -307,6 +312,9 @@ class Job(threading.Thread):
def init(self):
self.job = self.service(configuration=copy.deepcopy(self.config))
+ def full_name(self):
+ return self.job.name
+
def check(self):
ok = self.job.check()
self.checks -= self.checks != self.inf and not ok
@@ -448,15 +456,45 @@ class PluginConfig(dict):
return self['default_run']
+class FileLockRegistry:
+ def __init__(self, path):
+ self.path = path
+ self.locks = dict()
+
+ def register(self, name):
+ if name in self.locks:
+ return
+ file = os.path.join(self.path, '{0}.collector.lock'.format(name))
+ lock = filelock.FileLock(file)
+ lock.acquire(timeout=0)
+ self.locks[name] = lock
+
+ def unregister(self, name):
+ if name not in self.locks:
+ return
+ lock = self.locks[name]
+ lock.release()
+ del self.locks[name]
+
+
+class DummyRegistry:
+ def register(self, name):
+ pass
+
+ def unregister(self, name):
+ pass
+
+
class Plugin:
config_name = 'python.d.conf'
jobs_status_dump_name = 'pythond-jobs-statuses.json'
- def __init__(self, modules_to_run, min_update_every):
+ def __init__(self, modules_to_run, min_update_every, registry):
self.modules_to_run = modules_to_run
self.min_update_every = min_update_every
self.config = PluginConfig(PLUGIN_BASE_CONF)
self.log = PythonDLogger()
+ self.registry = registry
self.started_jobs = collections.defaultdict(dict)
self.jobs = list()
self.saver = None
@@ -590,7 +628,7 @@ class Plugin:
job.init()
except Exception as error:
self.log.warning("{0}[{1}] : unhandled exception on init : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
+ job.module_name, job.real_name, repr(error)))
job.status = JOB_STATUS_DROPPED
continue
@@ -598,7 +636,7 @@ class Plugin:
ok = job.check()
except Exception as error:
self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
+ job.module_name, job.real_name, repr(error)))
job.status = JOB_STATUS_DROPPED
continue
if not ok:
@@ -608,11 +646,29 @@ class Plugin:
self.log.info('{0}[{1}] : check success'.format(job.module_name, job.real_name))
try:
+ self.registry.register(job.full_name())
+ except filelock.Timeout as error:
+ self.log.info('{0}[{1}] : already registered by another process, skipping the job ({2})'.format(
+ job.module_name, job.real_name, error))
+ job.status = JOB_STATUS_DROPPED
+ continue
+ except Exception as error:
+ self.log.warning('{0}[{1}] : registration failed: {2}, skipping the job'.format(
+ job.module_name, job.real_name, error))
+ job.status = JOB_STATUS_DROPPED
+ continue
+
+ try:
job.create()
except Exception as error:
self.log.warning("{0}[{1}] : unhandled exception on create : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
+ job.module_name, job.real_name, repr(error)))
job.status = JOB_STATUS_DROPPED
+ try:
+ self.registry.unregister(job.full_name())
+ except Exception as error:
+ self.log.warning('{0}[{1}] : deregistration failed: {2}'.format(
+ job.module_name, job.real_name, error))
continue
self.started_jobs[job.module_name] = job.actual_name
@@ -686,6 +742,7 @@ def parse_command_line():
debug = False
trace = False
+ nolock = False
update_every = 1
modules_to_run = list()
@@ -702,6 +759,9 @@ def parse_command_line():
if 'trace' in opts:
trace = True
opts.remove('trace')
+ if 'nolock' in opts:
+ nolock = True
+ opts.remove('nolock')
if opts:
modules_to_run = list(opts)
@@ -711,13 +771,15 @@ def parse_command_line():
'update_every',
'debug',
'trace',
+ 'nolock',
'modules_to_run',
])
return cmd(
update_every,
debug,
trace,
- modules_to_run
+ nolock,
+ modules_to_run,
)
@@ -765,9 +827,15 @@ def main():
log.info('probably you meant : \n{0}'.format(pprint.pformat(guessed, width=1)))
return
+ if DIRS.locks and not cmd.nolock:
+ registry = FileLockRegistry(DIRS.locks)
+ else:
+ registry = DummyRegistry()
+
p = Plugin(
cmd.modules_to_run or AVAILABLE_MODULES,
cmd.update_every,
+ registry,
)
try:
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
index f63cb7c2f..dea50eea0 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
@@ -22,12 +22,14 @@ class ExecutableService(SimpleService):
Get raw data from executed command
:return: <list>
"""
+ command = command or self.command
+ self.debug("Executing command '{0}'".format(' '.join(command)))
try:
- p = Popen(command if command else self.command, stdout=PIPE, stderr=PIPE)
+ p = Popen(command, stdout=PIPE, stderr=PIPE)
except Exception as error:
- self.error('Executing command {command} resulted in error: {error}'.format(command=command or self.command,
- error=error))
+ self.error('Executing command {0} resulted in error: {1}'.format(command, error))
return None
+
data = list()
std = p.stderr if stderr else p.stdout
for line in std:
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
index 354d09ad8..7f5c7d221 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -51,11 +51,7 @@ class MySQLService(SimpleService):
properties['host'] = conf['host']
properties['port'] = int(conf.get('port', 3306))
elif conf.get('my.cnf'):
- if MySQLdb.__name__ == 'pymysql':
- # TODO: this is probablt wrong, it depends on version
- self.error('"my.cnf" parsing is not working for pymysql')
- else:
- properties['read_default_file'] = conf['my.cnf']
+ properties['read_default_file'] = conf['my.cnf']
if conf.get('ssl'):
properties['ssl'] = conf['ssl']
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
index 4dfd226b0..c304ccec2 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -55,11 +55,18 @@ class RuntimeCounters:
self.penalty = round(min(self.retries * self.update_every / 2, MAX_PENALTY))
+def clean_module_name(name):
+ if name.startswith('pythond_'):
+ return name[8:]
+ return name
+
+
class SimpleService(PythonDLimitedLogger, object):
"""
Prototype of Service class.
Implemented basic functionality to run jobs by `python.d.plugin`
"""
+
def __init__(self, configuration, name=''):
"""
:param configuration: <dict>
@@ -70,7 +77,7 @@ class SimpleService(PythonDLimitedLogger, object):
self.order = list()
self.definitions = dict()
- self.module_name = self.__module__
+ self.module_name = clean_module_name(self.__module__)
self.job_name = configuration.pop('job_name')
self.override_name = configuration.pop('override_name')
self.fake_name = None
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
index 337bf57d8..bef3792da 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -247,7 +247,7 @@ class SocketService(SimpleService):
if self._check_raw_data(data):
break
- self.debug('final response: {0}'.format(data))
+ self.debug(u'final response: {0}'.format(data))
return data
def _get_raw_data(self, raw=False, request=None):
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
index cfc7899e5..1faf036a4 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -47,6 +47,7 @@ class UrlService(SimpleService):
self.proxy_url = self.configuration.get('proxy_url')
self.method = self.configuration.get('method', 'GET')
self.header = self.configuration.get('header')
+ self.body = self.configuration.get('body')
self.request_timeout = self.configuration.get('timeout', 1)
self.respect_retry_after_header = self.configuration.get('respect_retry_after_header')
self.tls_verify = self.configuration.get('tls_verify')
@@ -119,15 +120,17 @@ class UrlService(SimpleService):
:return: str
"""
try:
- status, data = self._get_raw_data_with_status(url, manager, **kwargs)
+ response = self._do_request(url, manager, **kwargs)
except Exception as error:
self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error))
return None
- if status == 200:
- return data
+ if response.status == 200:
+ if isinstance(response.data, str):
+ return response.data
+ return response.data.decode(errors='ignore')
else:
- self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=status))
+ self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=response.status))
return None
def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
@@ -135,12 +138,26 @@ class UrlService(SimpleService):
Get status and response body content from http request. Does not catch exceptions
:return: int, str
"""
+ response = self._do_request(url, manager, retries, redirect, **kwargs)
+
+ if isinstance(response.data, str):
+ return response.status, response.data
+ return response.status, response.data.decode(errors='ignore')
+
+ def _do_request(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
+ """
+ Get response from http request. Does not catch exceptions
+ :return: HTTPResponse
+ """
url = url or self.url
manager = manager or self._manager
retry = urllib3.Retry(retries)
if hasattr(retry, 'respect_retry_after_header'):
retry.respect_retry_after_header = bool(self.respect_retry_after_header)
+ if self.body:
+ kwargs['body'] = self.body
+
response = manager.request(
method=self.method,
url=url,
@@ -150,9 +167,7 @@ class UrlService(SimpleService):
redirect=redirect,
**kwargs
)
- if isinstance(response.data, str):
- return response.status, response.data
- return response.status, response.data.decode()
+ return response
def check(self):
"""
diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
index 6e78ed6e7..93be43d14 100644
--- a/collectors/python.d.plugin/python_modules/bases/charts.py
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -16,8 +16,7 @@ CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
"{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
-
+ "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden} {obsolete}'\n"
DIMENSION_SET = "SET '{id}' = {value}\n"
@@ -40,13 +39,17 @@ def create_runtime_chart(func):
:param func: class method
:return:
"""
+
def wrapper(*args, **kwargs):
self = args[0]
+ chart = RUNTIME_CHART_CREATE.format(
+ job_name=self.name,
+ update_every=self._runtime_counters.update_every,
+ )
+ safe_print(chart)
ok = func(*args, **kwargs)
- if ok:
- safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name,
- update_every=self._runtime_counters.update_every))
return ok
+
return wrapper
@@ -72,6 +75,7 @@ class Charts:
All charts stored in a dict.
Chart is a instance of Chart class.
Charts adding must be done using Charts.add_chart() method only"""
+
def __init__(self, job_name, priority, cleanup, get_update_every, module_name):
"""
:param job_name: <bound method>
@@ -138,6 +142,7 @@ class Charts:
class Chart:
"""Represent a chart"""
+
def __init__(self, params):
"""
:param params: <list>
@@ -281,6 +286,7 @@ class Chart:
class Dimension:
"""Represent a dimension"""
+
def __init__(self, params):
"""
:param params: <list>
@@ -346,6 +352,7 @@ class Dimension:
class ChartVariable:
"""Represent a chart variable"""
+
def __init__(self, params):
"""
:param params: <list>
diff --git a/collectors/python.d.plugin/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
index 4c25aafd5..93bf8cf05 100644
--- a/collectors/python.d.plugin/python_modules/bases/collection.py
+++ b/collectors/python.d.plugin/python_modules/bases/collection.py
@@ -5,6 +5,8 @@
import os
+from threading import Lock
+
PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':')
CHART_BEGIN = 'BEGIN {0} {1}\n'
@@ -12,6 +14,8 @@ CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n"
DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n"
DIMENSION_SET = "SET '{0}' = {1}\n"
+print_lock = Lock()
+
def setdefault_values(config, base_dict):
for key, value in base_dict.items():
@@ -23,10 +27,11 @@ def run_and_exit(func):
def wrapper(*args, **kwargs):
func(*args, **kwargs)
exit(1)
+
return wrapper
-def on_try_except_finally(on_except=(None, ), on_finally=(None, )):
+def on_try_except_finally(on_except=(None,), on_finally=(None,)):
except_func = on_except[0]
finally_func = on_finally[0]
@@ -40,7 +45,9 @@ def on_try_except_finally(on_except=(None, ), on_finally=(None, )):
finally:
if finally_func:
finally_func(*on_finally[1:])
+
return wrapper
+
return decorator
@@ -49,6 +56,7 @@ def static_vars(**kwargs):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
+
return decorate
@@ -58,7 +66,9 @@ def safe_print(*msg):
:param msg:
:return:
"""
+ print_lock.acquire()
print(''.join(msg))
+ print_lock.release()
def find_binary(binary):
@@ -67,7 +77,7 @@ def find_binary(binary):
:return:
"""
for directory in PATH:
- binary_name = '/'.join([directory, binary])
+ binary_name = os.path.join(directory, binary)
if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK):
return binary_name
return None
@@ -82,3 +92,26 @@ def read_last_line(f):
break
result = opened.readline()
return result.decode()
+
+
+def unicode_str(arg):
+ """Return the argument as a unicode string.
+
+ The `unicode` function has been removed from Python3 and `str` takes its
+ place. This function is a helper which will try using Python 2's `unicode`
+ and if it doesn't exist, assume we're using Python 3 and use `str`.
+
+ :param arg:
+ :return: <str>
+ """
+ # TODO: fix
+ try:
+ # https://github.com/netdata/netdata/issues/7613
+ if isinstance(arg, unicode):
+ return arg
+ return unicode(arg, errors='ignore')
+ # https://github.com/netdata/netdata/issues/7642
+ except TypeError:
+ return unicode(arg)
+ except NameError:
+ return str(arg)
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
index 9bf2e086b..47f196a6d 100644
--- a/collectors/python.d.plugin/python_modules/bases/loggers.py
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -13,7 +13,7 @@ try:
except ImportError:
from time import time
-from bases.collection import on_try_except_finally
+from bases.collection import on_try_except_finally, unicode_str
LOGGING_LEVELS = {'CRITICAL': 50,
@@ -121,23 +121,23 @@ class BaseLogger(object):
self.logger.setLevel(LOGGING_LEVELS[level])
def debug(self, *msg, **kwargs):
- self.logger.debug(' '.join(map(str, msg)), **kwargs)
+ self.logger.debug(' '.join(map(unicode_str, msg)), **kwargs)
def info(self, *msg, **kwargs):
- self.logger.info(' '.join(map(str, msg)), **kwargs)
+ self.logger.info(' '.join(map(unicode_str, msg)), **kwargs)
def warning(self, *msg, **kwargs):
- self.logger.warning(' '.join(map(str, msg)), **kwargs)
+ self.logger.warning(' '.join(map(unicode_str, msg)), **kwargs)
def error(self, *msg, **kwargs):
- self.logger.error(' '.join(map(str, msg)), **kwargs)
+ self.logger.error(' '.join(map(unicode_str, msg)), **kwargs)
def alert(self, *msg, **kwargs):
- self.logger.critical(' '.join(map(str, msg)), **kwargs)
+ self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs)
@on_try_except_finally(on_finally=(exit, 1))
def fatal(self, *msg, **kwargs):
- self.logger.critical(' '.join(map(str, msg)), **kwargs)
+ self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs)
class PythonDLogger(object):
diff --git a/collectors/python.d.plugin/python_modules/third_party/filelock.py b/collectors/python.d.plugin/python_modules/third_party/filelock.py
new file mode 100644
index 000000000..4c981672b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/filelock.py
@@ -0,0 +1,451 @@
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# For more information, please refer to <http://unlicense.org>
+
+"""
+A platform independent file lock that supports the with-statement.
+"""
+
+
+# Modules
+# ------------------------------------------------
+import logging
+import os
+import threading
+import time
+try:
+ import warnings
+except ImportError:
+ warnings = None
+
+try:
+ import msvcrt
+except ImportError:
+ msvcrt = None
+
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+
+
+# Backward compatibility
+# ------------------------------------------------
+try:
+ TimeoutError
+except NameError:
+ TimeoutError = OSError
+
+
+# Data
+# ------------------------------------------------
+__all__ = [
+ "Timeout",
+ "BaseFileLock",
+ "WindowsFileLock",
+ "UnixFileLock",
+ "SoftFileLock",
+ "FileLock"
+]
+
+__version__ = "3.0.12"
+
+
+_logger = None
+def logger():
+ """Returns the logger instance used in this module."""
+ global _logger
+ _logger = _logger or logging.getLogger(__name__)
+ return _logger
+
+
+# Exceptions
+# ------------------------------------------------
+class Timeout(TimeoutError):
+ """
+ Raised when the lock could not be acquired in *timeout*
+ seconds.
+ """
+
+ def __init__(self, lock_file):
+ """
+ """
+ #: The path of the file lock.
+ self.lock_file = lock_file
+ return None
+
+ def __str__(self):
+ temp = "The file lock '{}' could not be acquired."\
+ .format(self.lock_file)
+ return temp
+
+
+# Classes
+# ------------------------------------------------
+
+# This is a helper class which is returned by :meth:`BaseFileLock.acquire`
+# and wraps the lock to make sure __enter__ is not called twice when entering
+# the with statement.
+# If we would simply return *self*, the lock would be acquired again
+# in the *__enter__* method of the BaseFileLock, but not released again
+# automatically.
+#
+# :seealso: issue #37 (memory leak)
+class _Acquire_ReturnProxy(object):
+
+ def __init__(self, lock):
+ self.lock = lock
+ return None
+
+ def __enter__(self):
+ return self.lock
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.lock.release()
+ return None
+
+
+class BaseFileLock(object):
+ """
+ Implements the base class of a file lock.
+ """
+
+ def __init__(self, lock_file, timeout = -1):
+ """
+ """
+ # The path to the lock file.
+ self._lock_file = lock_file
+
+ # The file descriptor for the *_lock_file* as it is returned by the
+ # os.open() function.
+ # This file lock is only NOT None, if the object currently holds the
+ # lock.
+ self._lock_file_fd = None
+
+ # The default timeout value.
+ self.timeout = timeout
+
+ # We use this lock primarily for the lock counter.
+ self._thread_lock = threading.Lock()
+
+ # The lock counter is used for implementing the nested locking
+ # mechanism. Whenever the lock is acquired, the counter is increased and
+ # the lock is only released, when this value is 0 again.
+ self._lock_counter = 0
+ return None
+
+ @property
+ def lock_file(self):
+ """
+ The path to the lock file.
+ """
+ return self._lock_file
+
+ @property
+ def timeout(self):
+ """
+ You can set a default timeout for the filelock. It will be used as
+ fallback value in the acquire method, if no timeout value (*None*) is
+ given.
+
+ If you want to disable the timeout, set it to a negative value.
+
+ A timeout of 0 means, that there is exactly one attempt to acquire the
+ file lock.
+
+ .. versionadded:: 2.0.0
+ """
+ return self._timeout
+
+ @timeout.setter
+ def timeout(self, value):
+ """
+ """
+ self._timeout = float(value)
+ return None
+
+ # Platform dependent locking
+ # --------------------------------------------
+
+ def _acquire(self):
+ """
+ Platform dependent. If the file lock could be
+ acquired, self._lock_file_fd holds the file descriptor
+ of the lock file.
+ """
+ raise NotImplementedError()
+
+ def _release(self):
+ """
+ Releases the lock and sets self._lock_file_fd to None.
+ """
+ raise NotImplementedError()
+
+ # Platform independent methods
+ # --------------------------------------------
+
+ @property
+ def is_locked(self):
+ """
+ True, if the object holds the file lock.
+
+ .. versionchanged:: 2.0.0
+
+ This was previously a method and is now a property.
+ """
+ return self._lock_file_fd is not None
+
+ def acquire(self, timeout=None, poll_intervall=0.05):
+ """
+ Acquires the file lock or fails with a :exc:`Timeout` error.
+
+ .. code-block:: python
+
+ # You can use this method in the context manager (recommended)
+ with lock.acquire():
+ pass
+
+ # Or use an equivalent try-finally construct:
+ lock.acquire()
+ try:
+ pass
+ finally:
+ lock.release()
+
+ :arg float timeout:
+ The maximum time waited for the file lock.
+ If ``timeout < 0``, there is no timeout and this method will
+ block until the lock could be acquired.
+ If ``timeout`` is None, the default :attr:`~timeout` is used.
+
+ :arg float poll_intervall:
+ We check once in *poll_intervall* seconds if we can acquire the
+ file lock.
+
+ :raises Timeout:
+ if the lock could not be acquired in *timeout* seconds.
+
+ .. versionchanged:: 2.0.0
+
+ This method returns now a *proxy* object instead of *self*,
+ so that it can be used in a with statement without side effects.
+ """
+ # Use the default timeout, if no timeout is provided.
+ if timeout is None:
+ timeout = self.timeout
+
+ # Increment the number right at the beginning.
+ # We can still undo it, if something fails.
+ with self._thread_lock:
+ self._lock_counter += 1
+
+ lock_id = id(self)
+ lock_filename = self._lock_file
+ start_time = time.time()
+ try:
+ while True:
+ with self._thread_lock:
+ if not self.is_locked:
+ logger().debug('Attempting to acquire lock %s on %s', lock_id, lock_filename)
+ self._acquire()
+
+ if self.is_locked:
+ logger().info('Lock %s acquired on %s', lock_id, lock_filename)
+ break
+ elif timeout >= 0 and time.time() - start_time > timeout:
+ logger().debug('Timeout on acquiring lock %s on %s', lock_id, lock_filename)
+ raise Timeout(self._lock_file)
+ else:
+ logger().debug(
+ 'Lock %s not acquired on %s, waiting %s seconds ...',
+ lock_id, lock_filename, poll_intervall
+ )
+ time.sleep(poll_intervall)
+ except:
+ # Something did go wrong, so decrement the counter.
+ with self._thread_lock:
+ self._lock_counter = max(0, self._lock_counter - 1)
+
+ raise
+ return _Acquire_ReturnProxy(lock = self)
+
+ def release(self, force = False):
+ """
+ Releases the file lock.
+
+ Please note, that the lock is only completly released, if the lock
+ counter is 0.
+
+ Also note, that the lock file itself is not automatically deleted.
+
+ :arg bool force:
+ If true, the lock counter is ignored and the lock is released in
+ every case.
+ """
+ with self._thread_lock:
+
+ if self.is_locked:
+ self._lock_counter -= 1
+
+ if self._lock_counter == 0 or force:
+ lock_id = id(self)
+ lock_filename = self._lock_file
+
+ logger().debug('Attempting to release lock %s on %s', lock_id, lock_filename)
+ self._release()
+ self._lock_counter = 0
+ logger().info('Lock %s released on %s', lock_id, lock_filename)
+
+ return None
+
+ def __enter__(self):
+ self.acquire()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.release()
+ return None
+
+ def __del__(self):
+ self.release(force = True)
+ return None
+
+
+# Windows locking mechanism
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+class WindowsFileLock(BaseFileLock):
+ """
+ Uses the :func:`msvcrt.locking` function to hard lock the lock file on
+ windows systems.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
+
+ try:
+ fd = os.open(self._lock_file, open_mode)
+ except OSError:
+ pass
+ else:
+ try:
+ msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
+ except (IOError, OSError):
+ os.close(fd)
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ fd = self._lock_file_fd
+ self._lock_file_fd = None
+ msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
+ os.close(fd)
+
+ try:
+ os.remove(self._lock_file)
+ # Probably another instance of the application
+ # that acquired the file lock.
+ except OSError:
+ pass
+ return None
+
+# Unix locking mechanism
+# ~~~~~~~~~~~~~~~~~~~~~~
+
+class UnixFileLock(BaseFileLock):
+ """
+ Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
+ fd = os.open(self._lock_file, open_mode)
+
+ try:
+ fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except (IOError, OSError):
+ os.close(fd)
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ # Do not remove the lockfile:
+ #
+ # https://github.com/benediktschmitt/py-filelock/issues/31
+ # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
+ fd = self._lock_file_fd
+ self._lock_file_fd = None
+ fcntl.flock(fd, fcntl.LOCK_UN)
+ os.close(fd)
+ return None
+
+# Soft lock
+# ~~~~~~~~~
+
+class SoftFileLock(BaseFileLock):
+ """
+ Simply watches the existence of the lock file.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
+ try:
+ fd = os.open(self._lock_file, open_mode)
+ except (IOError, OSError):
+ pass
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ os.close(self._lock_file_fd)
+ self._lock_file_fd = None
+
+ try:
+ os.remove(self._lock_file)
+ # The file is already deleted and that's what we want.
+ except OSError:
+ pass
+ return None
+
+
+# Platform filelock
+# ~~~~~~~~~~~~~~~~~
+
+#: Alias for the lock, which should be used for the current platform. On
+#: Windows, this is an alias for :class:`WindowsFileLock`, on Unix for
+#: :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`.
+FileLock = None
+
+if msvcrt:
+ FileLock = WindowsFileLock
+elif fcntl:
+ FileLock = UnixFileLock
+else:
+ FileLock = SoftFileLock
+
+ if warnings is not None:
+ warnings.warn("only soft file lock is available")
diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
index da04bb857..4ebd556c3 100644
--- a/collectors/python.d.plugin/python_modules/third_party/monotonic.py
+++ b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
@@ -54,6 +54,41 @@ except AttributeError:
import os
import sys
import threading
+
+
+ def clock_clock_gettime_c_library():
+ return ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_rt_library():
+ return ctypes.CDLL(ctypes.util.find_library('rt'), use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_c_library_synology6():
+ return ctypes.CDLL('/usr/lib/libc.so.6', use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_rt_library_synology6():
+ return ctypes.CDLL('/usr/lib/librt.so.1', use_errno=True).clock_gettime
+
+
+ def clock_gettime_linux():
+ # see https://github.com/netdata/netdata/issues/7976
+ order = [
+ clock_clock_gettime_c_library,
+ clock_clock_gettime_rt_library,
+ clock_clock_gettime_c_library_synology6,
+ clock_clock_gettime_rt_library_synology6,
+ ]
+
+ for gettime in order:
+ try:
+ return gettime()
+ except (RuntimeError, AttributeError, OSError):
+ continue
+ raise RuntimeError('can not find c and rt libraries')
+
+
try:
if sys.platform == 'darwin': # OS X, iOS
# See Technical Q&A QA1398 of the Mac Developer Library:
@@ -132,12 +167,7 @@ except AttributeError:
return final_milliseconds / 1000.0
else:
- try:
- clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
- use_errno=True).clock_gettime
- except Exception:
- clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
- use_errno=True).clock_gettime
+ clock_gettime = clock_gettime_linux()
class timespec(ctypes.Structure):
"""Time specification, as described in clock_gettime(3)."""
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
index 1d7ad956d..2130a7b3a 100644
--- a/collectors/python.d.plugin/rabbitmq/README.md
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -1,6 +1,13 @@
-# rabbitmq
+<!--
+title: "RabbitMQ monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rabbitmq/README.md
+sidebar_label: "RabbitMQ"
+-->
+
+# RabbitMQ monitoring with Netdata
+
+Collects message broker global and per virtual host metrics.
-This module monitors [RabbitMQ](https://www.rabbitmq.com/) performance and health metrics.
Following charts are drawn:
@@ -62,7 +69,38 @@ Per Vhost charts:
- redeliver
- return_unroutable
-## configuration
+2. Per Queue charts:
+
+ 1. **Queued Messages**
+
+ - messages
+ - paged_out
+ - persistent
+ - ready
+ - unacknowledged
+
+ 2. **Queue Messages stats**
+
+ - ack
+ - confirm
+ - deliver
+ - get
+ - get_no_ack
+ - publish
+ - redeliver
+ - return_unroutable
+
+## Configuration
+
+Edit the `python.d/rabbitmq.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/rabbitmq.conf
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
```yaml
socket:
@@ -73,8 +111,6 @@ socket:
pass : 'guest'
```
-When no configuration file is found, module tries to connect to: `localhost:15672`.
-
---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
index d581c14e7..866b777f7 100644
--- a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -9,6 +9,7 @@ from bases.FrameworkServices.UrlService import UrlService
API_NODE = 'api/nodes'
API_OVERVIEW = 'api/overview'
+API_QUEUES = 'api/queues'
API_VHOSTS = 'api/vhosts'
NODE_STATS = [
@@ -31,7 +32,30 @@ OVERVIEW_STATS = [
'message_stats.ack',
'message_stats.redeliver',
'message_stats.deliver',
- 'message_stats.publish'
+ 'message_stats.publish',
+ 'churn_rates.connection_created_details.rate',
+ 'churn_rates.connection_closed_details.rate',
+ 'churn_rates.channel_created_details.rate',
+ 'churn_rates.channel_closed_details.rate',
+ 'churn_rates.queue_created_details.rate',
+ 'churn_rates.queue_declared_details.rate',
+ 'churn_rates.queue_deleted_details.rate'
+]
+
+QUEUE_STATS = [
+ 'messages',
+ 'messages_paged_out',
+ 'messages_persistent',
+ 'messages_ready',
+ 'messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.confirm',
+ 'message_stats.deliver',
+ 'message_stats.get',
+ 'message_stats.get_no_ack',
+ 'message_stats.publish',
+ 'message_stats.redeliver',
+ 'message_stats.return_unroutable',
]
VHOST_MESSAGE_STATS = [
@@ -47,6 +71,9 @@ VHOST_MESSAGE_STATS = [
ORDER = [
'queued_messages',
+ 'connection_churn_rates',
+ 'channel_churn_rates',
+ 'queue_churn_rates',
'message_rates',
'global_counts',
'file_descriptors',
@@ -104,6 +131,28 @@ CHARTS = {
['object_totals_exchanges', 'exchanges', 'absolute']
]
},
+ 'connection_churn_rates': {
+ 'options': [None, 'Connection Churn Rates', 'operations/s', 'overview', 'rabbitmq.connection_churn_rates', 'line'],
+ 'lines': [
+ ['churn_rates_connection_created_details_rate', 'created', 'absolute'],
+ ['churn_rates_connection_closed_details_rate', 'closed', 'absolute']
+ ]
+ },
+ 'channel_churn_rates': {
+ 'options': [None, 'Channel Churn Rates', 'operations/s', 'overview', 'rabbitmq.channel_churn_rates', 'line'],
+ 'lines': [
+ ['churn_rates_channel_created_details_rate', 'created', 'absolute'],
+ ['churn_rates_channel_closed_details_rate', 'closed', 'absolute']
+ ]
+ },
+ 'queue_churn_rates': {
+ 'options': [None, 'Queue Churn Rates', 'operations/s', 'overview', 'rabbitmq.queue_churn_rates', 'line'],
+ 'lines': [
+ ['churn_rates_queue_created_details_rate', 'created', 'absolute'],
+ ['churn_rates_queue_declared_details_rate', 'declared', 'absolute'],
+ ['churn_rates_queue_deleted_details_rate', 'deleted', 'absolute']
+ ]
+ },
'queued_messages': {
'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'],
'lines': [
@@ -148,6 +197,44 @@ def vhost_chart_template(name):
return order, charts
+def queue_chart_template(queue_id):
+ vhost, name = queue_id
+ order = [
+ 'vhost_{0}_queue_{1}_queued_message'.format(vhost, name),
+ 'vhost_{0}_queue_{1}_messages_stats'.format(vhost, name),
+ ]
+ family = 'vhost {0}'.format(vhost)
+
+ charts = {
+ order[0]: {
+ 'options': [
+ None, 'Queue "{0}" in "{1}" queued messages'.format(name, vhost), 'messages', family, 'rabbitmq.queue_messages', 'line'],
+ 'lines': [
+ ['vhost_{0}_queue_{1}_messages'.format(vhost, name), 'messages', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_paged_out'.format(vhost, name), 'paged_out', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_persistent'.format(vhost, name), 'persistent', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_ready'.format(vhost, name), 'ready', 'absolute'],
+ ['vhost_{0}_queue_{1}_messages_unacknowledged'.format(vhost, name), 'unack', 'absolute'],
+ ]
+ },
+ order[1]: {
+ 'options': [
+ None, 'Queue "{0}" in "{1}" messages stats'.format(name, vhost), 'messages/s', family, 'rabbitmq.queue_messages_stats', 'line'],
+ 'lines': [
+ ['vhost_{0}_queue_{1}_message_stats_ack'.format(vhost, name), 'ack', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_confirm'.format(vhost, name), 'confirm', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_deliver'.format(vhost, name), 'deliver', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_get'.format(vhost, name), 'get', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_get_no_ack'.format(vhost, name), 'get_no_ack', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_publish'.format(vhost, name), 'publish', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_redeliver'.format(vhost, name), 'redeliver', 'incremental'],
+ ['vhost_{0}_queue_{1}_message_stats_return_unroutable'.format(vhost, name), 'return_unroutable', 'incremental'],
+ ]
+ },
+ }
+
+ return order, charts
+
class VhostStatsBuilder:
def __init__(self):
@@ -167,6 +254,21 @@ class VhostStatsBuilder:
stats = fetch_data(raw_data=self.stats, metrics=VHOST_MESSAGE_STATS)
return dict(('vhost_{0}_{1}'.format(name, k), v) for k, v in stats.items())
+class QueueStatsBuilder:
+ def __init__(self):
+ self.stats = None
+
+ def set(self, raw_stats):
+ self.stats = raw_stats
+
+ def id(self):
+ return self.stats['vhost'], self.stats['name']
+
+ def queue_stats(self):
+ vhost, name = self.id()
+ stats = fetch_data(raw_data=self.stats, metrics=QUEUE_STATS)
+ return dict(('vhost_{0}_queue_{1}_{2}'.format(vhost, name, k), v) for k, v in stats.items())
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
@@ -181,6 +283,11 @@ class Service(UrlService):
self.node_name = str()
self.vhost = VhostStatsBuilder()
self.collected_vhosts = set()
+ self.collect_queues_metrics = configuration.get('collect_queues_metrics', False)
+ self.debug("collect_queues_metrics is {0}".format("enabled" if self.collect_queues_metrics else "disabled"))
+ if self.collect_queues_metrics:
+ self.queue = QueueStatsBuilder()
+ self.collected_queues = set()
def _get_data(self):
data = dict()
@@ -201,6 +308,11 @@ class Service(UrlService):
if stats:
data.update(stats)
+ if self.collect_queues_metrics:
+ stats = self.get_queues_stats()
+ if stats:
+ data.update(stats)
+
return data or None
def get_overview_stats(self):
@@ -260,6 +372,31 @@ class Service(UrlService):
self.debug("number of vhosts: {0}, metrics: {1}".format(len(vhosts), len(data)))
return data
+ def get_queues_stats(self):
+ url = '{0}/{1}'.format(self.url, API_QUEUES)
+ self.debug("doing http request to '{0}'".format(url))
+ raw = self._get_raw_data(url)
+ if not raw:
+ return None
+
+ data = dict()
+ queues = loads(raw)
+ charts_initialized = len(self.charts) > 0
+
+ for queue in queues:
+ self.queue.set(queue)
+ if self.queue.id()[0] not in self.collected_vhosts:
+ continue
+
+ if charts_initialized and self.queue.id() not in self.collected_queues:
+ self.collected_queues.add(self.queue.id())
+ self.add_queue_charts(self.queue.id())
+
+ data.update(self.queue.queue_stats())
+
+ self.debug("number of queues: {0}, metrics: {1}".format(len(queues), len(data)))
+ return data
+
def add_vhost_charts(self, vhost_name):
order, charts = vhost_chart_template(vhost_name)
@@ -271,6 +408,17 @@ class Service(UrlService):
for dimension in dimensions:
new_chart.add_dimension(dimension)
+ def add_queue_charts(self, queue_id):
+ order, charts = queue_chart_template(queue_id)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
def fetch_data(raw_data, metrics):
data = dict()
@@ -291,5 +439,5 @@ def handle_disabled_disk_monitoring(node_stats):
# https://github.com/netdata/netdata/issues/7218
# can be "disk_free": "disk_free_monitoring_disabled"
v = node_stats.get('disk_free')
- if v and isinstance(v, str):
+ if v and not isinstance(v, int):
del node_stats['disk_free']
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
index ae0dbdb75..47d47a1bf 100644
--- a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
@@ -70,6 +70,12 @@
# user: 'username'
# pass: 'password'
#
+# Rabbitmq plugin can also collect stats per vhost per queues, which is disabled
+# by default. Please note that enabling this can induced a serious overhead on
+# both netdata and rabbitmq if a look of queues are configured and used.
+#
+# collect_queues_metrics: 'yes/no'
+#
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
index e7ddd382c..9fab56c33 100644
--- a/collectors/python.d.plugin/redis/README.md
+++ b/collectors/python.d.plugin/redis/README.md
@@ -1,6 +1,12 @@
-# redis
+<!--
+title: "Redis monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/redis/README.md
+sidebar_label: "Redis"
+-->
-Get INFO data from redis instance.
+# Redis monitoring with Netdata
+
+Monitors database status. It reads server response to `INFO` command.
Following charts are drawn:
@@ -30,7 +36,15 @@ Following charts are drawn:
- connected
-## configuration
+## Configuration
+
+Edit the `python.d/redis.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/redis.conf
+```
```yaml
socket:
diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
index 40ccb5274..e09916d86 100644
--- a/collectors/python.d.plugin/redis/redis.chart.py
+++ b/collectors/python.d.plugin/redis/redis.chart.py
@@ -5,7 +5,6 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import re
-
from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService
@@ -37,7 +36,6 @@ PIKA_ORDER = [
'uptime',
]
-
CHARTS = {
'operations': {
'options': [None, 'Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
@@ -53,8 +51,9 @@ CHARTS = {
]
},
'memory': {
- 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'line'],
+ 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'area'],
'lines': [
+ ['maxmemory', 'max', 'absolute', 1, 1024],
['used_memory', 'total', 'absolute', 1, 1024],
['used_memory_lua', 'lua', 'absolute', 1, 1024]
]
@@ -156,6 +155,7 @@ class Service(SocketService):
self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
self.request = 'INFO\r\n'.encode()
self.bgsave_time = 0
+ self.keyspace_dbs = set()
def do_auth(self):
resp = self._get_raw_data(request=self.auth_request)
@@ -189,23 +189,38 @@ class Service(SocketService):
:return: dict
"""
data = self.get_raw_and_parse()
-
if not data:
return None
+ self.calc_hit_rate(data)
+ self.calc_redis_keys(data)
+ self.calc_redis_rdb_save_operations(data)
+ return data
+
+ @staticmethod
+ def calc_hit_rate(data):
try:
- data['hit_rate'] = (
- (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits']) + int(data['keyspace_misses']))
- )
+ hits = int(data['keyspace_hits'])
+ misses = int(data['keyspace_misses'])
+ data['hit_rate'] = hits * 100 / (hits + misses)
except (KeyError, ZeroDivisionError):
data['hit_rate'] = 0
- if data.get('redis_version') and data.get('rdb_bgsave_in_progress'):
- self.get_data_redis_specific(data)
-
- return data
-
- def get_data_redis_specific(self, data):
+ def calc_redis_keys(self, data):
+ if not data.get('redis_version'):
+ return
+ # db0:keys=2,expires=0,avg_ttl=0
+ new_keyspace_dbs = [k for k in data if k.startswith('db') and k not in self.keyspace_dbs]
+ for db in new_keyspace_dbs:
+ self.keyspace_dbs.add(db)
+ self.charts['keys_redis'].add_dimension([db, None, 'absolute'])
+ for db in self.keyspace_dbs:
+ if db not in data:
+ data[db] = 0
+
+ def calc_redis_rdb_save_operations(self, data):
+ if not (data.get('redis_version') and data.get('rdb_bgsave_in_progress')):
+ return
if data['rdb_bgsave_in_progress'] != '0':
self.bgsave_time += self.update_every
else:
@@ -229,11 +244,6 @@ class Service(SocketService):
for n in self.order:
self.definitions.update(copy_chart(n))
- if data.get('redis_version'):
- for k in data:
- if k.startswith('db'):
- self.definitions['keys_redis']['lines'].append([k, None, 'absolute'])
-
return True
def _check_raw_data(self, data):
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
index 277154336..85cebd96a 100644
--- a/collectors/python.d.plugin/rethinkdbs/README.md
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -1,6 +1,12 @@
-# rethinkdbs
+<!--
+title: "RethinkDB monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rethinkdbs/README.md
+sidebar_label: "RethinkDB"
+-->
-Module monitor rethinkdb health metrics.
+# RethinkDB monitoring with Netdata
+
+Collects database server and cluster statistics.
Following charts are drawn:
@@ -21,7 +27,15 @@ Following charts are drawn:
- documents
-## configuration
+## Configuration
+
+Edit the `python.d/rethinkdbs.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/rethinkdbs.conf
+```
```yaml
localhost:
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
index 80cc1cf18..e3fbc3632 100644
--- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
@@ -5,6 +5,7 @@
try:
import rethinkdb as rdb
+
HAS_RETHINKDB = True
except ImportError:
HAS_RETHINKDB = False
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
index 9a82f2ff7..d8bd3a914 100644
--- a/collectors/python.d.plugin/retroshare/README.md
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -1,3 +1,47 @@
-# retroshare
+<!--
+title: "RetroShare monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/retroshare/README.md
+sidebar_label: "RetroShare"
+-->
+
+# RetroShare monitoring with Netdata
+
+Monitors application bandwidth, peers and DHT metrics.
+
+This module will monitor one or more `RetroShare` applications, depending on your configuration.
+
+## Charts
+
+This module produces the following charts:
+
+- Bandwidth in `kilobits/s`
+- Peers in `peers`
+- DHT in `peers`
+
+
+## Configuration
+
+Edit the `python.d/retroshare.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/retroshare.conf
+```
+
+Here is an example for 2 servers:
+
+```yaml
+localhost:
+ url : 'http://localhost:9090'
+ user : "user"
+ password : "pass"
+
+remote:
+ url : 'http://203.0.113.1:9090'
+ user : "user"
+ password : "pass"
+```
+---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fretroshare%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
index feb871fbd..3f9593e94 100644
--- a/collectors/python.d.plugin/retroshare/retroshare.chart.py
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -7,7 +7,6 @@ import json
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'bandwidth',
'peers',
diff --git a/collectors/python.d.plugin/riakkv/README.md b/collectors/python.d.plugin/riakkv/README.md
index 04343dd99..d0ea9a137 100644
--- a/collectors/python.d.plugin/riakkv/README.md
+++ b/collectors/python.d.plugin/riakkv/README.md
@@ -1,8 +1,14 @@
-# riakkv
+<!--
+title: "Riak KV monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/riakkv/README.md
+sidebar_label: "Riak KV"
+-->
-Monitors one or more Riak KV servers.
+# Riak KV monitoring with Netdata
-**Requirements:**
+Collects database stats from `/stats` endpoint.
+
+## Requirements
- An accessible `/stats` endpoint. See [the Riak KV configuration reference documentation](https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces)
for how to enable this.
@@ -94,7 +100,15 @@ listed
- bad_entry
- extract_fail
-## configuration
+## Configuration
+
+Edit the `python.d/riakkv.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/riakkv.conf
+```
The module needs to be passed the full URL to Riak's stats endpoint.
For example:
@@ -110,3 +124,5 @@ With no explicit configuration given, the module will attempt to connect to
The default update frequency for the plugin is set to 2 seconds as Riak
internally updates the metrics every second. If we were to update the metrics
every second, the resulting graph would contain odd jitter.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Friakkv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/riakkv/riakkv.chart.py b/collectors/python.d.plugin/riakkv/riakkv.chart.py
index f81e177a5..c390c8bc0 100644
--- a/collectors/python.d.plugin/riakkv/riakkv.chart.py
+++ b/collectors/python.d.plugin/riakkv/riakkv.chart.py
@@ -67,14 +67,16 @@ ORDER = [
CHARTS = {
# Throughput metrics
"kv.node_operations": {
- "options": [None, "Reads & writes coordinated by this node", "operations/s", "throughput", "riak.kv.throughput", "line"],
+ "options": [None, "Reads & writes coordinated by this node", "operations/s", "throughput", "riak.kv.throughput",
+ "line"],
"lines": [
["node_gets_total", "gets", "incremental"],
["node_puts_total", "puts", "incremental"]
]
},
"dt.vnode_updates": {
- "options": [None, "Update operations coordinated by local vnodes by data type", "operations/s", "throughput", "riak.dt.vnode_updates", "line"],
+ "options": [None, "Update operations coordinated by local vnodes by data type", "operations/s", "throughput",
+ "riak.dt.vnode_updates", "line"],
"lines": [
["vnode_counter_update_total", "counters", "incremental"],
["vnode_set_update_total", "sets", "incremental"],
@@ -94,7 +96,8 @@ CHARTS = {
]
},
"consistent.operations": {
- "options": [None, "Consistent node operations", "operations/s", "throughput", "riak.consistent.operations", "line"],
+ "options": [None, "Consistent node operations", "operations/s", "throughput", "riak.consistent.operations",
+ "line"],
"lines": [
["consistent_gets_total", "gets", "incremental"],
["consistent_puts_total", "puts", "incremental"],
@@ -103,7 +106,8 @@ CHARTS = {
# Latency metrics
"kv.latency.get": {
- "options": [None, "Time between reception of a client GET request and subsequent response to client", "ms", "latency", "riak.kv.latency.get", "line"],
+ "options": [None, "Time between reception of a client GET request and subsequent response to client", "ms",
+ "latency", "riak.kv.latency.get", "line"],
"lines": [
["node_get_fsm_time_mean", "mean", "absolute", 1, 1000],
["node_get_fsm_time_median", "median", "absolute", 1, 1000],
@@ -113,7 +117,8 @@ CHARTS = {
]
},
"kv.latency.put": {
- "options": [None, "Time between reception of a client PUT request and subsequent response to client", "ms", "latency", "riak.kv.latency.put", "line"],
+ "options": [None, "Time between reception of a client PUT request and subsequent response to client", "ms",
+ "latency", "riak.kv.latency.put", "line"],
"lines": [
["node_put_fsm_time_mean", "mean", "absolute", 1, 1000],
["node_put_fsm_time_median", "median", "absolute", 1, 1000],
@@ -123,7 +128,8 @@ CHARTS = {
]
},
"dt.latency.counter": {
- "options": [None, "Time it takes to perform an Update Counter operation", "ms", "latency", "riak.dt.latency.counter_merge", "line"],
+ "options": [None, "Time it takes to perform an Update Counter operation", "ms", "latency",
+ "riak.dt.latency.counter_merge", "line"],
"lines": [
["object_counter_merge_time_mean", "mean", "absolute", 1, 1000],
["object_counter_merge_time_median", "median", "absolute", 1, 1000],
@@ -133,7 +139,8 @@ CHARTS = {
]
},
"dt.latency.set": {
- "options": [None, "Time it takes to perform an Update Set operation", "ms", "latency", "riak.dt.latency.set_merge", "line"],
+ "options": [None, "Time it takes to perform an Update Set operation", "ms", "latency",
+ "riak.dt.latency.set_merge", "line"],
"lines": [
["object_set_merge_time_mean", "mean", "absolute", 1, 1000],
["object_set_merge_time_median", "median", "absolute", 1, 1000],
@@ -143,7 +150,8 @@ CHARTS = {
]
},
"dt.latency.map": {
- "options": [None, "Time it takes to perform an Update Map operation", "ms", "latency", "riak.dt.latency.map_merge", "line"],
+ "options": [None, "Time it takes to perform an Update Map operation", "ms", "latency",
+ "riak.dt.latency.map_merge", "line"],
"lines": [
["object_map_merge_time_mean", "mean", "absolute", 1, 1000],
["object_map_merge_time_median", "median", "absolute", 1, 1000],
@@ -164,7 +172,8 @@ CHARTS = {
]
},
"search.latency.index": {
- "options": [None, "Time it takes Search to index a new document", "ms", "latency", "riak.search.latency.index", "line"],
+ "options": [None, "Time it takes Search to index a new document", "ms", "latency", "riak.search.latency.index",
+ "line"],
"lines": [
["search_index_latency_median", "median", "absolute", 1, 1000],
["search_index_latency_min", "min", "absolute", 1, 1000],
@@ -205,7 +214,8 @@ CHARTS = {
]
},
"vm.memory.processes": {
- "options": [None, "Memory allocated & used by Erlang processes", "MB", "vm", "riak.vm.memory.processes", "line"],
+ "options": [None, "Memory allocated & used by Erlang processes", "MB", "vm", "riak.vm.memory.processes",
+ "line"],
"lines": [
["memory_processes", "allocated", "absolute", 1, 1024 * 1024],
["memory_processes_used", "used", "absolute", 1, 1024 * 1024]
@@ -214,7 +224,8 @@ CHARTS = {
# General Riak Load/Health metrics
"kv.siblings_encountered.get": {
- "options": [None, "Number of siblings encountered during GET operations by this node during the past minute", "siblings", "load", "riak.kv.siblings_encountered.get", "line"],
+ "options": [None, "Number of siblings encountered during GET operations by this node during the past minute",
+ "siblings", "load", "riak.kv.siblings_encountered.get", "line"],
"lines": [
["node_get_fsm_siblings_mean", "mean", "absolute"],
["node_get_fsm_siblings_median", "median", "absolute"],
@@ -224,7 +235,8 @@ CHARTS = {
]
},
"kv.objsize.get": {
- "options": [None, "Object size encountered by this node during the past minute", "KB", "load", "riak.kv.objsize.get", "line"],
+ "options": [None, "Object size encountered by this node during the past minute", "KB", "load",
+ "riak.kv.objsize.get", "line"],
"lines": [
["node_get_fsm_objsize_mean", "mean", "absolute", 1, 1024],
["node_get_fsm_objsize_median", "median", "absolute", 1, 1024],
@@ -234,7 +246,9 @@ CHARTS = {
]
},
"search.vnodeq_size": {
- "options": [None, "Number of unprocessed messages in the vnode message queues of Search on this node in the past minute", "messages", "load", "riak.search.vnodeq_size", "line"],
+ "options": [None,
+ "Number of unprocessed messages in the vnode message queues of Search on this node in the past minute",
+ "messages", "load", "riak.search.vnodeq_size", "line"],
"lines": [
["riak_search_vnodeq_mean", "mean", "absolute"],
["riak_search_vnodeq_median", "median", "absolute"],
@@ -244,20 +258,23 @@ CHARTS = {
]
},
"search.index_errors": {
- "options": [None, "Number of document index errors encountered by Search", "errors", "load", "riak.search.index", "line"],
+ "options": [None, "Number of document index errors encountered by Search", "errors", "load",
+ "riak.search.index", "line"],
"lines": [
["search_index_fail_count", "errors", "absolute"]
]
},
"core.pbc": {
- "options": [None, "Protocol buffer connections by status", "connections", "load", "riak.core.protobuf_connections", "line"],
+ "options": [None, "Protocol buffer connections by status", "connections", "load",
+ "riak.core.protobuf_connections", "line"],
"lines": [
["pbc_active", "active", "absolute"],
# ["pbc_connects", "established_pastmin", "absolute"]
]
},
"core.repairs": {
- "options": [None, "Number of repair operations this node has coordinated", "repairs", "load", "riak.core.repairs", "line"],
+ "options": [None, "Number of repair operations this node has coordinated", "repairs", "load",
+ "riak.core.repairs", "line"],
"lines": [
["read_repairs", "read", "absolute"]
]
@@ -275,7 +292,8 @@ CHARTS = {
# Writing "Sidejob's" here seems to cause some weird issues: it results in this chart being rendered in
# its own context and additionally, moves the entire Riak graph all the way up to the top of the Netdata
# dashboard for some reason.
- "options": [None, "Finite state machines being rejected by Sidejobs overload protection", "fsms", "load", "riak.core.fsm_rejected", "line"],
+ "options": [None, "Finite state machines being rejected by Sidejobs overload protection", "fsms", "load",
+ "riak.core.fsm_rejected", "line"],
"lines": [
["node_get_fsm_rejected", "get", "absolute"],
["node_put_fsm_rejected", "put", "absolute"]
@@ -284,7 +302,8 @@ CHARTS = {
# General Riak Search Load / Health metrics
"search.errors": {
- "options": [None, "Number of writes to Search failed due to bad data format by reason", "writes", "load", "riak.search.index", "line"],
+ "options": [None, "Number of writes to Search failed due to bad data format by reason", "writes", "load",
+ "riak.search.index", "line"],
"lines": [
["search_index_bad_entry_count", "bad_entry", "absolute"],
["search_index_extract_fail_count", "extract_fail", "absolute"],
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
index ad99deade..ed26d2871 100644
--- a/collectors/python.d.plugin/samba/README.md
+++ b/collectors/python.d.plugin/samba/README.md
@@ -1,8 +1,14 @@
-# samba
+<!--
+title: "Samba monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/samba/README.md
+sidebar_label: "Samba"
+-->
-Performance metrics of Samba file sharing.
+# Samba monitoring with Netdata
-**Requirements:**
+Monitors the performance metrics of Samba file sharing.
+
+## Requirements
- `smbstatus` program
- `sudo` program
@@ -15,7 +21,7 @@ It produces the following charts:
1. **Syscall R/Ws** in kilobytes/s
- sendfile
- - recvfle
+ - recvfile
2. **Smb2 R/Ws** in kilobytes/s
@@ -67,14 +73,22 @@ Add to `sudoers`:
netdata ALL=(root) NOPASSWD: /path/to/smbstatus
```
-## configuration
+## Configuration
- **samba** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+**samba** is disabled by default. Should be explicitly enabled in `python.d.conf`.
```yaml
samba: yes
```
+Edit the `python.d/samba.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/samba.conf
+```
+
---
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsamba%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
index ac89c29b0..8eebcd60c 100644
--- a/collectors/python.d.plugin/samba/samba.chart.py
+++ b/collectors/python.d.plugin/samba/samba.chart.py
@@ -17,10 +17,10 @@
# (like find and notify... good examples).
import re
+import os
-from bases.collection import find_binary
from bases.FrameworkServices.ExecutableService import ExecutableService
-
+from bases.collection import find_binary
disabled_by_default = True
@@ -96,6 +96,9 @@ CHARTS = {
}
}
+SUDO = 'sudo'
+SMBSTATUS = 'smbstatus'
+
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
@@ -105,20 +108,26 @@ class Service(ExecutableService):
self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
def check(self):
- sudo_binary, smbstatus_binary = find_binary('sudo'), find_binary('smbstatus')
-
- if not (sudo_binary and smbstatus_binary):
- self.error("Can\'t locate 'sudo' or 'smbstatus' binary")
+ smbstatus_binary = find_binary(SMBSTATUS)
+ if not smbstatus_binary:
+ self.error("can't locate '{0}' binary".format(SMBSTATUS))
return False
- self.command = [sudo_binary, '-v']
- err = self._get_raw_data(stderr=True)
- if err:
- self.error(''.join(err))
+ if os.getuid() == 0:
+ self.command = ' '.join([smbstatus_binary, '-P'])
+ return ExecutableService.check(self)
+
+ sudo_binary = find_binary(SUDO)
+ if not sudo_binary:
+ self.error("can't locate '{0}' binary".format(SUDO))
+ return False
+ command = [sudo_binary, '-n', '-l', smbstatus_binary, '-P']
+ smbstatus = '{0} -P'.format(smbstatus_binary)
+ allowed = self._get_raw_data(command=command)
+ if not (allowed and allowed[0].strip() == smbstatus):
+ self.error("not allowed to run sudo for command '{0}'".format(smbstatus))
return False
-
self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
-
return ExecutableService.check(self)
def _get_data(self):
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
index 1c0613c72..5d2934844 100644
--- a/collectors/python.d.plugin/sensors/README.md
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -1,12 +1,24 @@
-# sensors
+<!--
+title: "Linux machine sensors monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/sensors/README.md
+sidebar_label: "Linux machine sensors"
+-->
-System sensors information.
+# Linux machine sensors monitoring with Netdata
+
+Reads system sensors information (temperature, voltage, electric current, power, etc.).
Charts are created dynamically.
-## configuration
+## Configuration
+
+Edit the `python.d/sensors.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-For detailed configuration information please read [`sensors.conf`](sensors.conf) file.
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/sensors.conf
+```
### possible issues
diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
index 6b54ea601..8c0cde6bb 100644
--- a/collectors/python.d.plugin/sensors/sensors.chart.py
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -3,10 +3,8 @@
# Author: Pawel Krupa (paulfantom)
# SPDX-License-Identifier: GPL-3.0-or-later
-from third_party import lm_sensors as sensors
-
from bases.FrameworkServices.SimpleService import SimpleService
-
+from third_party import lm_sensors as sensors
ORDER = [
'temperature',
@@ -162,4 +160,4 @@ class Service(SimpleService):
self.create_definitions()
- return True
+ return bool(self.get_data())
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
index 6f4dda50c..a1b41f408 100644
--- a/collectors/python.d.plugin/smartd_log/README.md
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -1,8 +1,14 @@
-# smartd_log
+<!--
+title: "Storage devices monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/smartd_log/README.md
+sidebar_label: "S.M.A.R.T. attributes"
+-->
-Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+# Storage devices monitoring with Netdata
-**Requirements:**
+Monitors `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+
+## Requirements
- `smartmontools`
@@ -97,7 +103,15 @@ Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontool
`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.
-## configuration
+## Configuration
+
+Edit the `python.d/smartd_log.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/smartd_log.conf
+```
```yaml
local:
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
index f121ab2e0..8f10a5351 100644
--- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -5,13 +5,11 @@
import os
import re
-
from copy import deepcopy
from time import time
-from bases.collection import read_last_line
from bases.FrameworkServices.SimpleService import SimpleService
-
+from bases.collection import read_last_line
INCREMENTAL = 'incremental'
ABSOLUTE = 'absolute'
@@ -59,7 +57,6 @@ ATTR_VERIFY_ERR_COR = 'verify-total-err-corrected'
ATTR_VERIFY_ERR_UNC = 'verify-total-unc-errors'
ATTR_TEMPERATURE = 'temperature'
-
RE_ATA = re.compile(
'(\d+);' # attribute
'(\d+);' # normalized value
@@ -265,7 +262,7 @@ CHARTS = {
'line'],
'lines': [],
'attrs': [ATTR5],
- 'algo': INCREMENTAL,
+ 'algo': ABSOLUTE,
},
'reserved_block_count': {
'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
@@ -533,7 +530,9 @@ def handle_error(*errors):
return method(*args)
except errors:
return None
+
return on_call
+
return on_method
@@ -653,10 +652,10 @@ class Service(SimpleService):
current_time = time()
for disk in self.disks[:]:
if any(
- [
- not disk.alive,
- not disk.log_file.is_active(current_time, self.age),
- ]
+ [
+ not disk.alive,
+ not disk.log_file.is_active(current_time, self.age),
+ ]
):
self.disks.remove(disk.raw_name)
self.remove_disk_from_charts(disk)
@@ -673,7 +672,7 @@ class Service(SimpleService):
return len(self.disks)
- def create_disk_from_file(self, full_name, current_time):
+ def create_disk_from_file(self, full_name, current_time):
if not full_name.endswith(CSV):
self.debug('skipping {0}: not a csv file'.format(full_name))
return None
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
index 8b74913de..9b297f639 100644
--- a/collectors/python.d.plugin/spigotmc/README.md
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -1,6 +1,12 @@
-# spigotmc
+<!--
+title: "SpigotMC monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/spigotmc/README.md
+sidebar_label: "SpigotMC"
+-->
-This module does some really basic monitoring for Spigot Minecraft servers.
+# SpigotMC monitoring with Netdata
+
+Performs basic monitoring for Spigot Minecraft servers.
It provides two charts, one tracking server-side ticks-per-second in
1, 5 and 15 minute averages, and one tracking the number of currently
@@ -9,7 +15,15 @@ active users.
This is not compatible with Spigot plugins which change the format of
the data returned by the `tps` or `list` console commands.
-## configuration
+## Configuration
+
+Edit the `python.d/spigotmc.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/spigotmc.conf
+```
```yaml
host: localhost
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
index 79d17058c..f334113e4 100644
--- a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -3,12 +3,11 @@
# Author: Austin S. Hemmelgarn (Ferroin)
# SPDX-License-Identifier: GPL-3.0-or-later
-import socket
import platform
import re
+import socket
from bases.FrameworkServices.SimpleService import SimpleService
-
from third_party import mcrcon
# Update only every 5 seconds because collection takes in excess of
@@ -43,9 +42,8 @@ CHARTS = {
}
}
-
_TPS_REGEX = re.compile(
- r'^.*: .*?' # Message lead-in
+ r'^.*: .*?' # Message lead-in
r'(\d{1,2}.\d+), .*?' # 1-minute TPS value
r'(\d{1,2}.\d+), .*?' # 5-minute TPS value
r'(\d{1,2}\.\d+).*$', # 15-minute TPS value
@@ -107,10 +105,10 @@ class Service(SimpleService):
def is_alive(self):
if any(
- [
- not self.alive,
- self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1
- ]
+ [
+ not self.alive,
+ self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1
+ ]
):
return self.reconnect()
return True
@@ -131,7 +129,8 @@ class Service(SimpleService):
else:
self.error('Unable to process TPS values.')
if not raw:
- self.error("'{0}' command returned no value, make sure you set correct password".format(COMMAND_TPS))
+ self.error(
+ "'{0}' command returned no value, make sure you set correct password".format(COMMAND_TPS))
except mcrcon.MCRconException:
self.error('Unable to fetch TPS values.')
except socket.error:
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
index 37b4dd7cb..f38e8bf05 100644
--- a/collectors/python.d.plugin/springboot/README.md
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -1,6 +1,12 @@
-# springboot
+<!--
+title: "Java Spring Boot 2 application monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/springboot/README.md
+sidebar_label: "Java Spring Boot 2 applications"
+-->
-This module will monitor one or more Java Spring-boot applications depending on configuration.
+# Java Spring Boot 2 application monitoring with Netdata
+
+Monitors one or more Java Spring-boot applications depending on configuration.
Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
## Configuration
@@ -87,14 +93,20 @@ Please refer [Spring Boot Actuator: Production-ready Features](https://docs.spri
- MarkSweep
- ...
-4. **Heap Mmeory Usage** in KB
+4. **Heap Memory Usage** in KB
- used
- committed
## Usage
-The springboot module is enabled by default. It looks up `http://localhost:8080/metrics` and `http://127.0.0.1:8080/metrics` to detect Spring Boot application by default. You can change it by editing `/etc/netdata/python.d/springboot.conf` (to edit it on your system run `/etc/netdata/edit-config python.d/springboot.conf`).
+Edit the `python.d/springboot.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/springboot.conf
+```
This module defines some common charts, and you can add custom charts by change the configurations.
@@ -126,6 +138,8 @@ You can disable the default charts by set `defaults.<chart-id>: false`.
The dimension name of extras charts should replace `.` to `_`.
-Please check [springboot.conf](springboot.conf) for more examples.
+Please check
+[springboot.conf](https://raw.githubusercontent.com/netdata/netdata/master/collectors/python.d.plugin/springboot/springboot.conf)
+for more examples.
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspringboot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
index eec870ebf..dbe11d6b8 100644
--- a/collectors/python.d.plugin/springboot/springboot.chart.py
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
@@ -4,8 +4,8 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import json
-from bases.FrameworkServices.UrlService import UrlService
+from bases.FrameworkServices.UrlService import UrlService
DEFAULT_ORDER = [
'response_code',
@@ -92,7 +92,7 @@ class Service(UrlService):
try:
data = json.loads(raw_data)
except ValueError:
- self.debug('%s is not a vaild JSON page' % self.url)
+ self.debug('%s is not a valid JSON page' % self.url)
return None
result = {
@@ -146,7 +146,7 @@ class Service(UrlService):
}
for line in lines:
- dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
+ dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
name = line.get('name', dimension)
algorithm = line.get('algorithm', 'absolute')
multiplier = line.get('multiplier', 1)
diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
index 13a398955..0cb369cd8 100644
--- a/collectors/python.d.plugin/springboot/springboot.conf
+++ b/collectors/python.d.plugin/springboot/springboot.conf
@@ -75,7 +75,7 @@
#
# Configuration example
# ---------------------
-# expample:
+# example:
# name: 'example'
# url: 'http://localhost:8080/metrics'
# defaults:
@@ -96,17 +96,17 @@
# options: { title: 'Eden Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_eden', charttype: 'area' }
# lines:
# - { dimension: 'mempool_eden_used', name: 'used'}
-# - { dimension: 'mempool_eden_committed', name: 'commited'}
+# - { dimension: 'mempool_eden_committed', name: 'committed'}
# - id: 'heap_survivor'
# options: { title: 'Survivor Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_survivor', charttype: 'area' }
# lines:
# - { dimension: 'mempool_survivor_used', name: 'used'}
-# - { dimension: 'mempool_survivor_committed', name: 'commited'}
+# - { dimension: 'mempool_survivor_committed', name: 'committed'}
# - id: 'heap_tenured'
# options: { title: 'Tenured Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_tenured', charttype: 'area' }
# lines:
# - { dimension: 'mempool_tenured_used', name: 'used'}
-# - { dimension: 'mempool_tenured_committed', name: 'commited'}
+# - { dimension: 'mempool_tenured_committed', name: 'committed'}
local:
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
index e1e3d0741..e3ed4e0df 100644
--- a/collectors/python.d.plugin/squid/README.md
+++ b/collectors/python.d.plugin/squid/README.md
@@ -1,6 +1,12 @@
-# squid
+<!--
+title: "Squid monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/squid/README.md
+sidebar_label: "Squid"
+-->
-This module will monitor one or more squid instances depending on configuration.
+# Squid monitoring with Netdata
+
+Monitors one or more squid instances depending on configuration.
It produces following charts:
@@ -26,7 +32,15 @@ It produces following charts:
- requests
- errors
-## configuration
+## Configuration
+
+Edit the `python.d/squid.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/squid.conf
+```
```yaml
priority : 50000
diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
index c00556b56..bcae2d892 100644
--- a/collectors/python.d.plugin/squid/squid.chart.py
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -5,7 +5,6 @@
from bases.FrameworkServices.SocketService import SocketService
-
ORDER = [
'clients_net',
'clients_requests',
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
index 4d492c2d0..f9f2ffe31 100644
--- a/collectors/python.d.plugin/tomcat/README.md
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -1,6 +1,12 @@
-# tomcat
+<!--
+title: "Apache Tomcat monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tomcat/README.md
+sidebar_label: "Tomcat"
+-->
-Present tomcat containers memory utilization.
+# Apache Tomcat monitoring with Netdata
+
+Presents memory utilization of tomcat containers.
Charts:
@@ -21,7 +27,15 @@ Charts:
- jvm
-## configuration
+## Configuration
+
+Edit the `python.d/tomcat.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/tomcat.conf
+```
```yaml
localhost:
diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
index ab3003304..90315f8c7 100644
--- a/collectors/python.d.plugin/tomcat/tomcat.chart.py
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -4,8 +4,8 @@
# Author: Wei He (Wing924)
# SPDX-License-Identifier: GPL-3.0-or-later
-import xml.etree.ElementTree as ET
import re
+import xml.etree.ElementTree as ET
from bases.FrameworkServices.UrlService import UrlService
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
index 40905a958..192a86a37 100644
--- a/collectors/python.d.plugin/tor/README.md
+++ b/collectors/python.d.plugin/tor/README.md
@@ -1,8 +1,14 @@
-# tor
+<!--
+title: "Tor monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tor/README.md
+sidebar_label: "Tor"
+-->
-Module connects to tor control port to collect traffic statistics.
+# Tor monitoring with Netdata
-**Requirements:**
+Connects to the Tor control port to collect traffic statistics.
+
+## Requirements
- `tor` program
- `stem` python package
@@ -14,9 +20,17 @@ It produces only one chart:
- read
- write
-## configuration
+## Configuration
+
+Edit the `python.d/tor.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/tor.conf
+```
-Needs only `control_port`
+Needs only `control_port`.
Here is an example for local server:
diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py
index c6378ba5c..8dc021a63 100644
--- a/collectors/python.d.plugin/tor/tor.chart.py
+++ b/collectors/python.d.plugin/tor/tor.chart.py
@@ -11,11 +11,11 @@ try:
import stem
import stem.connection
import stem.control
+
STEM_AVAILABLE = True
except ImportError:
STEM_AVAILABLE = False
-
DEF_PORT = 'default'
ORDER = [
@@ -35,6 +35,7 @@ CHARTS = {
class Service(SimpleService):
"""Provide netdata service for Tor"""
+
def __init__(self, configuration=None, name=None):
super(Service, self).__init__(configuration=configuration, name=name)
self.order = ORDER
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
index 9ced6060f..2a1dd77aa 100644
--- a/collectors/python.d.plugin/traefik/README.md
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -1,6 +1,12 @@
-# traefik
+<!--
+title: "Traefik monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/traefik/README.md
+sidebar_label: "Traefik"
+-->
-Module uses the `health` API to provide statistics.
+# Traefik monitoring with Netdata
+
+Uses the `health` API to provide statistics.
It produces:
@@ -39,7 +45,15 @@ It produces:
- Traefik server uptime
-## configuration
+## Configuration
+
+Edit the `python.d/traefik.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/traefik.conf
+```
Needs only `url` to server's `health`
diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
index 570339d0a..5a498467f 100644
--- a/collectors/python.d.plugin/traefik/traefik.chart.py
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -4,12 +4,10 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from collections import defaultdict
-
from json import loads
from bases.FrameworkServices.UrlService import UrlService
-
ORDER = [
'response_statuses',
'response_codes',
diff --git a/collectors/python.d.plugin/unbound/Makefile.inc b/collectors/python.d.plugin/unbound/Makefile.inc
deleted file mode 100644
index 59c306aed..000000000
--- a/collectors/python.d.plugin/unbound/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += unbound/unbound.chart.py
-dist_pythonconfig_DATA += unbound/unbound.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += unbound/README.md unbound/Makefile.inc
-
diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md
deleted file mode 100644
index 4a3076100..000000000
--- a/collectors/python.d.plugin/unbound/README.md
+++ /dev/null
@@ -1,114 +0,0 @@
-# unbound
-
-## Deprecation Notes
-
-This module is deprecated. Please use [new version](https://github.com/netdata/go.d.plugin/tree/master/modules/unbound) instead.
-
-___
-
-Monitoring uses the remote control interface to fetch statistics.
-
-Provides the following charts:
-
-1. **Queries Processed**
-
- - Ratelimited
- - Cache Misses
- - Cache Hits
- - Expired
- - Prefetched
- - Recursive
-
-2. **Request List**
-
- - Average Size
- - Max Size
- - Overwritten Requests
- - Overruns
- - Current Size
- - User Requests
-
-3. **Recursion Timings**
-
-- Average recursion processing time
-- Median recursion processing time
-
-If extended stats are enabled, also provides:
-
-4. **Cache Sizes**
-
- - Message Cache
- - RRset Cache
- - Infra Cache
- - DNSSEC Key Cache
- - DNSCrypt Shared Secret Cache
- - DNSCrypt Nonce Cache
-
-## Configuration
-
-Unbound must be manually configured to enable the remote-control protocol.
-Check the Unbound documentation for info on how to do this. Additionally,
-if you want to take advantage of the autodetection this plugin offers,
-you will need to make sure your `unbound.conf` file only uses spaces for
-indentation (the default config shipped by most distributions uses tabs
-instead of spaces).
-
-Once you have the Unbound control protocol enabled, you need to make sure
-that either the certificate and key are readable by Netdata (if you're
-using the regular control interface), or that the socket is accessible
-to Netdata (if you're using a UNIX socket for the contorl interface).
-
-By default, for the local system, everything can be auto-detected
-assuming Unbound is configured correctly and has been told to listen
-on the loopback interface or a UNIX socket. This is done by looking
-up info in the Unbound config file specified by the `ubconf` key.
-
-To enable extended stats for a given job, add `extended: yes` to the
-definition.
-
-You can also enable per-thread charts for a given job by adding
-`per_thread: yes` to the definition. Note that the numbe rof threads
-is only checked on startup.
-
-A basic local configuration with extended statistics and per-thread
-charts looks like this:
-
-```yaml
-local:
- ubconf: /etc/unbound/unbound.conf
- extended: yes
- per_thread: yes
-```
-
-While it's a bit more complicated to set up correctly, it is recommended
-that you use a UNIX socket as it provides far better performance.
-
-### Troubleshooting
-
-If you've configured the module and can't get it to work, make sure and
-check all of the following:
-
-- If you're using autodetection, double check that your `unbound.conf`
- file is actually using spaces instead of tabs, and that appropriate
- indentation is present. Most Linux distributions ship a default config
- for Unbound that uses tabs, and the plugin can't read such a config file
- correctly. Also, make sure this file is actually readable by Netdata.
-- Ensure that the control protocol is actually configured correctly.
- You can check this quickly by running `unbound-control stats_noreset`
- as root, which should print out a bunch of info about the internal
- statistics of the server. If this returns an error, you don't have
- the control protocol set up correctly.
-- If using the regular control interface, make sure that the certificate
- and key file you have configured in `unbound.conf` are readable by
- Netdata. In general, it's preferred to use ACL's on the files to
- provide the required permissions.
-- If using a UNIX socket, make sure that the socket is both readable
- _and_ writable by Netdata. Just like with the regular control
- interface, it's preferred to use ACL's to provide these permissions.
-- Make sure that SELinux, Apparmor, or any other mandatory access control
- system isn't interfering with the access requirements mentioned above.
- In some cases, you may have to add a local rule to allow this access.
-
----
-
-[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Funbound%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py
deleted file mode 100644
index 590de4c98..000000000
--- a/collectors/python.d.plugin/unbound/unbound.chart.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: unbound netdata python.d module
-# Author: Austin S. Hemmelgarn (Ferroin)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import sys
-
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-from bases.loaders import load_config
-
-PRECISION = 1000
-
-ORDER = [
- 'queries',
- 'recursion',
- 'reqlist',
-]
-
-CHARTS = {
- 'queries': {
- 'options': [None, 'Queries Processed', 'queries', 'Unbound', 'unbound.queries', 'line'],
- 'lines': [
- ['ratelimit', 'ratelimited', 'absolute', 1, 1],
- ['cachemiss', 'cache_miss', 'absolute', 1, 1],
- ['cachehit', 'cache_hit', 'absolute', 1, 1],
- ['expired', 'expired', 'absolute', 1, 1],
- ['prefetch', 'prefetched', 'absolute', 1, 1],
- ['recursive', 'recursive', 'absolute', 1, 1]
- ]
- },
- 'recursion': {
- 'options': [None, 'Recursion Timings', 'milliseconds', 'Unbound', 'unbound.recursion', 'line'],
- 'lines': [
- ['recursive_avg', 'average', 'absolute', 1, 1],
- ['recursive_med', 'median', 'absolute', 1, 1]
- ]
- },
- 'reqlist': {
- 'options': [None, 'Request List', 'items', 'Unbound', 'unbound.reqlist', 'line'],
- 'lines': [
- ['reqlist_avg', 'average_size', 'absolute', 1, 1],
- ['reqlist_max', 'maximum_size', 'absolute', 1, 1],
- ['reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
- ['reqlist_exceeded', 'overruns', 'absolute', 1, 1],
- ['reqlist_current', 'current_size', 'absolute', 1, 1],
- ['reqlist_user', 'user_requests', 'absolute', 1, 1]
- ]
- }
-}
-
-# These get added too if we are told to use extended stats.
-EXTENDED_ORDER = ['cache']
-
-EXTENDED_CHARTS = {
- 'cache': {
- 'options': [None, 'Cache Sizes', 'items', 'Unbound', 'unbound.cache', 'stacked'],
- 'lines': [
- ['cache_message', 'message_cache', 'absolute', 1, 1],
- ['cache_rrset', 'rrset_cache', 'absolute', 1, 1],
- ['cache_infra', 'infra_cache', 'absolute', 1, 1],
- ['cache_key', 'dnssec_key_cache', 'absolute', 1, 1],
- ['cache_dnscss', 'dnscrypt_Shared_Secret_cache', 'absolute', 1, 1],
- ['cache_dnscn', 'dnscrypt_Nonce_cache', 'absolute', 1, 1]
- ]
- }
-}
-
-# This is used as a templates for the per-thread charts.
-PER_THREAD_CHARTS = {
- '_queries': {
- 'options': [None, '{longname} Queries Processed', 'queries', 'Queries Processed',
- 'unbound.threads.queries', 'line'],
- 'lines': [
- ['{shortname}_ratelimit', 'ratelimited', 'absolute', 1, 1],
- ['{shortname}_cachemiss', 'cache_miss', 'absolute', 1, 1],
- ['{shortname}_cachehit', 'cache_hit', 'absolute', 1, 1],
- ['{shortname}_expired', 'expired', 'absolute', 1, 1],
- ['{shortname}_prefetch', 'prefetched', 'absolute', 1, 1],
- ['{shortname}_recursive', 'recursive', 'absolute', 1, 1]
- ]
- },
- '_recursion': {
- 'options': [None, '{longname} Recursion Timings', 'milliseconds', 'Recursive Timings',
- 'unbound.threads.recursion', 'line'],
- 'lines': [
- ['{shortname}_recursive_avg', 'average', 'absolute', 1, 1],
- ['{shortname}_recursive_med', 'median', 'absolute', 1, 1]
- ]
- },
- '_reqlist': {
- 'options': [None, '{longname} Request List', 'items', 'Request List', 'unbound.threads.reqlist', 'line'],
- 'lines': [
- ['{shortname}_reqlist_avg', 'average_size', 'absolute', 1, 1],
- ['{shortname}_reqlist_max', 'maximum_size', 'absolute', 1, 1],
- ['{shortname}_reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
- ['{shortname}_reqlist_exceeded', 'overruns', 'absolute', 1, 1],
- ['{shortname}_reqlist_current', 'current_size', 'absolute', 1, 1],
- ['{shortname}_reqlist_user', 'user_requests', 'absolute', 1, 1]
- ]
- }
-}
-
-# This maps the Unbound stat names to our names and precision requiremnets.
-STAT_MAP = {
- 'total.num.queries_ip_ratelimited': ('ratelimit', 1),
- 'total.num.cachehits': ('cachehit', 1),
- 'total.num.cachemiss': ('cachemiss', 1),
- 'total.num.zero_ttl': ('expired', 1),
- 'total.num.prefetch': ('prefetch', 1),
- 'total.num.recursivereplies': ('recursive', 1),
- 'total.requestlist.avg': ('reqlist_avg', 1),
- 'total.requestlist.max': ('reqlist_max', 1),
- 'total.requestlist.overwritten': ('reqlist_overwritten', 1),
- 'total.requestlist.exceeded': ('reqlist_exceeded', 1),
- 'total.requestlist.current.all': ('reqlist_current', 1),
- 'total.requestlist.current.user': ('reqlist_user', 1),
- # Unbound reports recursion timings as fractional seconds, but we want to show them as milliseconds.
- 'total.recursion.time.avg': ('recursive_avg', PRECISION),
- 'total.recursion.time.median': ('recursive_med', PRECISION),
- 'msg.cache.count': ('cache_message', 1),
- 'rrset.cache.count': ('cache_rrset', 1),
- 'infra.cache.count': ('cache_infra', 1),
- 'key.cache.count': ('cache_key', 1),
- 'dnscrypt_shared_secret.cache.count': ('cache_dnscss', 1),
- 'dnscrypt_nonce.cache.count': ('cache_dnscn', 1)
-}
-
-# Same as above, but for per-thread stats.
-PER_THREAD_STAT_MAP = {
- '{shortname}.num.queries_ip_ratelimited': ('{shortname}_ratelimit', 1),
- '{shortname}.num.cachehits': ('{shortname}_cachehit', 1),
- '{shortname}.num.cachemiss': ('{shortname}_cachemiss', 1),
- '{shortname}.num.zero_ttl': ('{shortname}_expired', 1),
- '{shortname}.num.prefetch': ('{shortname}_prefetch', 1),
- '{shortname}.num.recursivereplies': ('{shortname}_recursive', 1),
- '{shortname}.requestlist.avg': ('{shortname}_reqlist_avg', 1),
- '{shortname}.requestlist.max': ('{shortname}_reqlist_max', 1),
- '{shortname}.requestlist.overwritten': ('{shortname}_reqlist_overwritten', 1),
- '{shortname}.requestlist.exceeded': ('{shortname}_reqlist_exceeded', 1),
- '{shortname}.requestlist.current.all': ('{shortname}_reqlist_current', 1),
- '{shortname}.requestlist.current.user': ('{shortname}_reqlist_user', 1),
- # Unbound reports recursion timings as fractional seconds, but we want to show them as milliseconds.
- '{shortname}.recursion.time.avg': ('{shortname}_recursive_avg', PRECISION),
- '{shortname}.recursion.time.median': ('{shortname}_recursive_med', PRECISION)
-}
-
-
-def is_readable(name):
- return os.access(name, os.R_OK)
-
-
-# Used to actually generate per-thread charts.
-def _get_perthread_info(thread):
- sname = 'thread{0}'.format(thread)
- lname = 'Thread {0}'.format(thread)
- charts = dict()
- order = []
- statmap = dict()
-
- for item in PER_THREAD_CHARTS:
- cname = '{0}{1}'.format(sname, item)
- chart = deepcopy(PER_THREAD_CHARTS[item])
- chart['options'][1] = chart['options'][1].format(longname=lname)
-
- for index, line in enumerate(chart['lines']):
- chart['lines'][index][0] = line[0].format(shortname=sname)
-
- order.append(cname)
- charts[cname] = chart
-
- for key, value in PER_THREAD_STAT_MAP.items():
- statmap[key.format(shortname=sname)] = (value[0].format(shortname=sname), value[1])
-
- return charts, order, statmap
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- # The unbound control protocol is always TLS encapsulated
- # unless it's used over a UNIX socket, so enable TLS _before_
- # doing the normal SocketService initialization.
- configuration['tls'] = True
- self.port = 8935
- SocketService.__init__(self, configuration, name)
- self.ext = self.configuration.get('extended', None)
- self.ubconf = self.configuration.get('ubconf', None)
- self.perthread = self.configuration.get('per_thread', False)
- self.threads = None
- self.order = deepcopy(ORDER)
- self.definitions = deepcopy(CHARTS)
- self.request = 'UBCT1 stats\n'
- self.statmap = deepcopy(STAT_MAP)
- self._parse_config()
- self._auto_config()
- self.debug('Extended stats: {0}'.format(self.ext))
- self.debug('Per-thread stats: {0}'.format(self.perthread))
- if self.ext:
- self.order = self.order + EXTENDED_ORDER
- self.definitions.update(EXTENDED_CHARTS)
- if self.unix_socket:
- self.debug('Using unix socket: {0}'.format(self.unix_socket))
- else:
- self.debug('Connecting to: {0}:{1}'.format(self.host, self.port))
- self.debug('Using key: {0}'.format(self.key))
- self.debug('Using certificate: {0}'.format(self.cert))
-
- def _auto_config(self):
- self.load_unbound_config()
-
- if not self.key:
- self.key = '/etc/unbound/unbound_control.key'
- if not self.cert:
- self.cert = '/etc/unbound/unbound_control.pem'
- if not self.port:
- self.port = 8953
-
- def load_unbound_config(self):
- if not (self.ubconf and is_readable(self.ubconf)):
- self.debug('Unbound configuration not found.')
- return
-
- self.debug('Loading Unbound config: {0}'.format(self.ubconf))
-
- try:
- conf = load_config(self.ubconf)
- except Exception as error:
- self.error("error on loading '{0}' : {1}".format(self.ubconf, error))
- return
-
- srv = conf.get('server')
- if self.ext is None:
- if srv and 'extended-statistics' in srv:
- self.ext = srv['extended-statistics']
-
- rc = conf.get('remote-control')
- if not (rc and isinstance(rc, dict)):
- return
-
- if rc.get('control-use-cert', False):
- self.key = self.key or rc.get('control-key-file')
- self.cert = self.cert or rc.get('control-cert-file')
- self.port = self.port or rc.get('control-port')
- else:
- ci = rc.get('control-interface', str())
- is_socket = '/' in ci
- if is_socket:
- self.unix_socket = ci
-
- def _generate_perthread_charts(self):
- tmporder = list()
- for thread in range(0, self.threads):
- charts, order, statmap = _get_perthread_info(thread)
- tmporder.extend(order)
- self.definitions.update(charts)
- self.statmap.update(statmap)
- self.order.extend(sorted(tmporder))
-
- def check(self):
- if not is_readable(self.key):
- self.error("ssl key '{0}' is not readable".format(self.key))
- return False
-
- if not is_readable(self.cert):
- self.error("ssl certificate '{0}' is not readable".format(self.certificate))
- return False
-
- # Check if authentication is working.
- self._connect()
- result = bool(self._sock)
- self._disconnect()
- # If auth works, and we need per-thread charts, query the server
- # to see how many threads it's using. This somewhat abuses the
- # SocketService API to get the data we need.
- if result and self.perthread:
- tmp = self.request
- if sys.version_info[0] < 3:
- self.request = 'UBCT1 status\n'
- else:
- self.request = b'UBCT1 status\n'
- raw = self._get_raw_data()
- if raw is None:
- result = False
- self.warning('Received no data from socket.')
- else:
- for line in raw.splitlines():
- if line.startswith('threads'):
- self.threads = int(line.split()[1])
- self._generate_perthread_charts()
- break
- if self.threads is None:
- self.info('Unable to auto-detect thread counts, disabling per-thread stats.')
- self.perthread = False
- self.request = tmp
- return result
-
- def _get_data(self):
- raw = self._get_raw_data()
- data = dict()
- tmp = dict()
- if raw is not None:
- for line in raw.splitlines():
- stat = line.split('=')
- tmp[stat[0]] = stat[1]
- for item in self.statmap:
- if item in tmp:
- data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1]
- else:
- self.warning('Received no data from socket.')
- return data
-
- @staticmethod
- def _check_raw_data(data):
- # The server will close the connection when it's done sending
- # data, so just keep looping until that happens.
- return False
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
index a8111965d..f564821a1 100644
--- a/collectors/python.d.plugin/uwsgi/README.md
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -1,10 +1,13 @@
-# uwsgi
+<!--
+title: "uWSGI monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/uwsgi/README.md
+sidebar_label: "uWSGI"
+-->
-Module monitor uwsgi performance metrics.
+# uWSGI monitoring with Netdata
-<https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html>
+Monitors performance metrics exposed by [`Stats Server`](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html).
-lines are creates dynamically based on how many workers are there
Following charts are drawn:
@@ -23,7 +26,15 @@ Following charts are drawn:
4. **Harakiris**
5. **Respawns**
-## configuration
+## Configuration
+
+Edit the `python.d/uwsgi.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/uwsgi.conf
+```
```yaml
socket:
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
index 511b770cf..e4d900005 100644
--- a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
@@ -5,8 +5,8 @@
import json
from copy import deepcopy
-from bases.FrameworkServices.SocketService import SocketService
+from bases.FrameworkServices.SocketService import SocketService
ORDER = [
'requests',
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
index 4de883d31..cb29738f5 100644
--- a/collectors/python.d.plugin/varnish/README.md
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -1,80 +1,56 @@
-# varnish
+<!--
+title: "Varnish Cache monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/README.md
+sidebar_label: "Varnish Cache"
+-->
-Module uses the `varnishstat` command to provide varnish cache statistics.
+# Varnish Cache monitoring with Netdata
-It produces:
+Provides HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics using `varnishstat` tool.
-1. **Connections Statistics** in connections/s
+Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
- - accepted
- - dropped
+## Requirements
-2. **Client Requests** in requests/s
+- `netdata` user must be a member of the `varnish` group
- - received
+## Charts
-3. **All History Hit Rate Ratio** in percent
+This module produces the following charts:
- - hit
- - miss
- - hitpass
+- Connections Statistics in `connections/s`
+- Client Requests in `requests/s`
+- All History Hit Rate Ratio in `percent`
+- Current Poll Hit Rate Ratio in `percent`
+- Expired Objects in `expired/s`
+- Least Recently Used Nuked Objects in `nuked/s`
+- Number Of Threads In All Pools in `pools`
+- Threads Statistics in `threads/s`
+- Current Queue Length in `requests`
+- Backend Connections Statistics in `connections/s`
+- Requests To The Backend in `requests/s`
+- ESI Statistics in `problems/s`
+- Memory Usage in `MiB`
+- Uptime in `seconds`
-4. **Current Poll Hit Rate Ratio** in percent
+For every backend (VBE):
- - hit
- - miss
- - hitpass
+- Backend Response Statistics in `kilobits/s`
-5. **Expired Objects** in expired/s
+For every storage (SMF, SMA, or MSE):
- - objects
+- Storage Usage in `KiB`
+- Storage Allocated Objects
-6. **Least Recently Used Nuked Objects** in nuked/s
+## Configuration
- - objects
+Edit the `python.d/varnish.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-7. **Number Of Threads In All Pools** in threads
-
- - threads
-
-8. **Threads Statistics** in threads/s
-
- - created
- - failed
- - limited
-
-9. **Current Queue Length** in requests
-
- - in queue
-
-10. **Backend Connections Statistics** in connections/s
-
- - successful
- - unhealthy
- - reused
- - closed
- - resycled
- - failed
-
-11. **Requests To The Backend** in requests/s
-
- - received
-
-12. **ESI Statistics** in problems/s
-
- - errors
- - warnings
-
-13. **Memory Usage** in MB
-
- - free
- - allocated
-
-14. **Uptime** in seconds
-
- - uptime
-
-## configuration
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/varnish.conf
+```
Only one parameter is supported:
@@ -82,7 +58,7 @@ Only one parameter is supported:
instance_name: 'name'
```
-The name of the varnishd instance to get logs from. If not specified, the host name is used.
+The name of the `varnishd` instance to get logs from. If not specified, the host name is used.
---
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
index 58745e24d..534d70926 100644
--- a/collectors/python.d.plugin/varnish/varnish.chart.py
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -103,7 +103,7 @@ CHARTS = {
['backend_unhealthy', 'unhealthy', 'incremental'],
['backend_reuse', 'reused', 'incremental'],
['backend_toolate', 'closed', 'incremental'],
- ['backend_recycle', 'resycled', 'incremental'],
+ ['backend_recycle', 'recycled', 'incremental'],
['backend_fail', 'failed', 'incremental']
]
},
@@ -135,9 +135,54 @@ CHARTS = {
}
}
+
+def backend_charts_template(name):
+ order = [
+ '{0}_response_statistics'.format(name),
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Backend "{0}"'.format(name), 'kilobits/s', 'backend response statistics',
+ 'varnish.backend', 'area'],
+ 'lines': [
+ ['{0}_beresp_hdrbytes'.format(name), 'header', 'incremental', 8, 1000],
+ ['{0}_beresp_bodybytes'.format(name), 'body', 'incremental', -8, 1000]
+ ]
+ },
+ }
+
+ return order, charts
+
+
+def storage_charts_template(name):
+ order = [
+ 'storage_{0}_usage'.format(name),
+ 'storage_{0}_alloc_objs'.format(name)
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Storage "{0}" Usage'.format(name), 'KiB', 'storage usage', 'varnish.storage_usage', 'stacked'],
+ 'lines': [
+ ['{0}.g_space'.format(name), 'free', 'absolute', 1, 1 << 10],
+ ['{0}.g_bytes'.format(name), 'allocated', 'absolute', 1, 1 << 10]
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Storage "{0}" Allocated Objects'.format(name), 'objects', 'storage usage', 'varnish.storage_alloc_objs', 'line'],
+ 'lines': [
+ ['{0}.g_alloc'.format(name), 'allocated', 'absolute']
+ ]
+ }
+ }
+
+ return order, charts
+
+
VARNISHSTAT = 'varnishstat'
-re_version = re.compile(r'varnish-(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
+re_version = re.compile(r'varnish-(?:plus-)?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
class VarnishVersion:
@@ -188,6 +233,8 @@ class Service(ExecutableService):
self.instance_name = configuration.get('instance_name')
self.parser = Parser()
self.command = None
+ self.collected_vbe = set()
+ self.collected_storages = set()
def create_command(self):
varnishstat = find_binary(VARNISHSTAT)
@@ -206,10 +253,7 @@ class Service(ExecutableService):
ver = parse_varnish_version(reply)
if not ver:
self.error("failed to parse reply from '{0}', used regex :'{1}', reply : {2}".format(
- ' '.join(command),
- re_version.pattern,
- reply,
- ))
+ ' '.join(command), re_version.pattern, reply))
return False
if self.instance_name:
@@ -241,9 +285,6 @@ class Service(ExecutableService):
self.error('cant parse the output...')
return False
- if self.parser.re_backend:
- backends = [b[0] for b in self.parser.backend_stats(reply)[::2]]
- self.create_backends_charts(backends)
return True
def get_data(self):
@@ -260,11 +301,11 @@ class Service(ExecutableService):
if not server_stats:
return None
- if self.parser.re_backend:
- backend_stats = self.parser.backend_stats(raw)
- data.update(dict(('_'.join([name, param]), value) for name, param, value in backend_stats))
+ stats = dict((param, value) for _, param, value in server_stats)
+ data.update(stats)
- data.update(dict((param, value) for _, param, value in server_stats))
+ self.get_vbe_backends(data, raw)
+ self.get_storages(server_stats)
# varnish 5 uses default.g_bytes and default.g_space
data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
@@ -272,27 +313,63 @@ class Service(ExecutableService):
return data
- def create_backends_charts(self, backends):
- for backend in backends:
- chart_name = ''.join([backend, '_response_statistics'])
- title = 'Backend "{0}"'.format(backend.capitalize())
- hdr_bytes = ''.join([backend, '_beresp_hdrbytes'])
- body_bytes = ''.join([backend, '_beresp_bodybytes'])
-
- chart = {
- chart_name:
- {
- 'options': [None, title, 'kilobits/s', 'backend response statistics',
- 'varnish.backend', 'area'],
- 'lines': [
- [hdr_bytes, 'header', 'incremental', 8, 1000],
- [body_bytes, 'body', 'incremental', -8, 1000]
- ]
- }
- }
-
- self.order.insert(0, chart_name)
- self.definitions.update(chart)
+ def get_vbe_backends(self, data, raw):
+ if not self.parser.re_backend:
+ return
+ stats = self.parser.backend_stats(raw)
+ if not stats:
+ return
+
+ for (name, param, value) in stats:
+ data['_'.join([name, param])] = value
+ if name in self.collected_vbe:
+ continue
+ self.collected_vbe.add(name)
+ self.add_backend_charts(name)
+
+ def get_storages(self, server_stats):
+ # Storage types:
+ # - SMF: File Storage
+ # - SMA: Malloc Storage
+ # - MSE: Massive Storage Engine (Varnish-Plus only)
+ #
+ # Stats example:
+ # [('SMF.', 'ssdStorage.c_req', '47686'),
+ # ('SMF.', 'ssdStorage.c_fail', '0'),
+ # ('SMF.', 'ssdStorage.c_bytes', '668102656'),
+ # ('SMF.', 'ssdStorage.c_freed', '140980224'),
+ # ('SMF.', 'ssdStorage.g_alloc', '39753'),
+ # ('SMF.', 'ssdStorage.g_bytes', '527122432'),
+ # ('SMF.', 'ssdStorage.g_space', '53159968768'),
+ # ('SMF.', 'ssdStorage.g_smf', '40130'),
+ # ('SMF.', 'ssdStorage.g_smf_frag', '311'),
+ # ('SMF.', 'ssdStorage.g_smf_large', '66')]
+ storages = [name for typ, name, _ in server_stats if typ.startswith(('SMF', 'SMA', 'MSE')) and name.endswith('g_space')]
+ if not storages:
+ return
+ for storage in storages:
+ storage = storage.split('.')[0]
+ if storage in self.collected_storages:
+ continue
+ self.collected_storages.add(storage)
+ self.add_storage_charts(storage)
+
+ def add_backend_charts(self, backend_name):
+ self.add_charts(backend_name, backend_charts_template)
+
+ def add_storage_charts(self, storage_name):
+ self.add_charts(storage_name, storage_charts_template)
+
+ def add_charts(self, name, charts_template):
+ order, charts = charts_template(name)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
def parse_varnish_version(lines):
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
index 74edcc0a8..31facef77 100644
--- a/collectors/python.d.plugin/w1sensor/README.md
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -1,14 +1,27 @@
-# w1sensor
+<!--
+title: "1-Wire Sensors monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/w1sensor/README.md
+sidebar_label: "1-Wire sensors"
+-->
+
+# 1-Wire Sensors monitoring with Netdata
+
+Monitors sensor temperature.
-Data from 1-Wire sensors.
On Linux these are supported by the wire, w1_gpio, and w1_therm modules.
Currently temperature sensors are supported and automatically detected.
Charts are created dynamically based on the number of detected sensors.
-## configuration
+## Configuration
+
+Edit the `python.d/w1sensor.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file.
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/w1sensor.conf
+```
---
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
index e50312fc5..c4f847bf0 100644
--- a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -5,6 +5,7 @@
import os
import re
+
from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
@@ -40,6 +41,7 @@ THERM_FAMILY = {
class Service(SimpleService):
"""Provide netdata service for 1-Wire sensors"""
+
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
index 33dfd696a..2cf60ed9e 100644
--- a/collectors/python.d.plugin/web_log/README.md
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -1,4 +1,12 @@
-# web_log
+<!--
+title: "Web server log (Apache, NGINX, Squid) monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/web_log/README.md
+sidebar_label: "Web server logs (Apache, NGINX, Squid)"
+-->
+
+# Web server log (Apache, NGINX, Squid) monitoring with Netdata
+
+Tails access log file and Collects web server/caching proxy metrics.
## Motivation
@@ -27,7 +35,15 @@ If Netdata is installed on a system running a web server, it will detect it and
## Configuration
-[**netdata**](https://my-netdata.io/) has a powerful `web_log` plugin, capable of incrementally parsing any number of web server log files. This plugin is automatically started with [**netdata**](https://my-netdata.io/) and comes, pre-configured, for finding web server log files on popular distributions. Its configuration is at [`/etc/netdata/python.d/web_log.conf`](web_log.conf), like this:
+Edit the `python.d/web_log.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/web_log.conf
+```
+
+[**netdata**](https://my-netdata.io/) has a powerful `web_log` plugin, capable of incrementally parsing any number of web server log files. This plugin is automatically started with [**netdata**](https://my-netdata.io/) and comes, pre-configured, for finding web server log files on popular distributions. Its configuration is at `/etc/netdata/python.d/web_log.conf`, like this:
```yaml
nginx_log:
@@ -42,8 +58,8 @@ apache_log:
observium : 'observium'
```
-Theodule has preconfigured jobs for nginx, apache and gunicorn on various distros.
-You can add one such section, for each of your web server log files.
+The module has preconfigured jobs for nginx, apache and gunicorn on various distros.
+You can add one such section for each of your web server log files.
> **Important**<br/>Keep in mind [**netdata**](https://my-netdata.io/) runs as user `netdata`. So, make sure user `netdata` has access to the logs directory and can read the log file.
@@ -51,7 +67,7 @@ You can add one such section, for each of your web server log files.
Once you have all log files configured and [**netdata**](https://my-netdata.io/) restarted, **for each log file** you will get a section at the [**netdata**](https://my-netdata.io/) dashboard, with the following charts.
-### responses by status
+### Responses by status
In this chart we tried to provide a meaningful status for all responses. So:
@@ -98,13 +114,13 @@ Here we show all the response codes in detail.
Number of responses for each response code family individually (requests/s)
-### bandwidth
+### Bandwidth
This is a nice view of the traffic the web server is receiving and is sending.
What is important to know for this chart, is that the bandwidth used for each request and response is accounted at the time the log is written. Since [**netdata**](https://my-netdata.io/) refreshes this chart every single second, you may have unrealistic spikes is the size of the requests or responses is too big. The reason is simple: a response may have needed 1 minute to be completed, but all the bandwidth used during that minute for the specific response will be accounted at the second the log line is written.
-As the legend on the chart suggests, you can use FireQoS to setup QoS on the web server ports and IPs to accurately measure the bandwidth the web server is using. Actually, [there may be a few more reasons to install QoS on your servers](../../tc.plugin/#tcplugin)...
+As the legend on the chart suggests, you can use FireQoS to setup QoS on the web server ports and IPs to accurately measure the bandwidth the web server is using. Actually, [there may be a few more reasons to install QoS on your servers](/collectors/tc.plugin/README.md#tcplugin)...
**Bandwidth** KB/s
@@ -115,7 +131,7 @@ As the legend on the chart suggests, you can use FireQoS to setup QoS on the web
> **Important**<br/>Most web servers do not log the request size by default.<br/>So, [unless you have configured your web server to log the size of requests](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), the `received` dimension will be always zero.
-### timings
+### Timings
[**netdata**](https://my-netdata.io/) will also render the `minimum`, `average` and `maximum` time the web server needed to respond to requests.
@@ -185,7 +201,7 @@ The last charts are about the unique IPs accessing your web server.
## Alarms
-The magic of [**netdata**](https://my-netdata.io/) is that all metrics are collected per second, and all metrics can be used or correlated to provide real-time alarms. Out of the box, [**netdata**](https://my-netdata.io/) automatically attaches the [following alarms](../../../health/health.d/web_log.conf) to all `web_log` charts (i.e. to all log files configured, individually):
+The magic of [**netdata**](https://my-netdata.io/) is that all metrics are collected per second, and all metrics can be used or correlated to provide real-time alarms. Out of the box, [**netdata**](https://my-netdata.io/) automatically attaches the following alarms] to all `web_log` charts (i.e. to all log files configured, individually):
| alarm|description|minimum<br/>requests|warning|critical|
|:----|-----------|:------------------:|:-----:|:------:|
@@ -198,6 +214,6 @@ The magic of [**netdata**](https://my-netdata.io/) is that all metrics are colle
The column `minimum requests` state the minimum number of requests required for the alarm to be evaluated. We found that when the site is receiving requests above this rate, these alarms are pretty accurate (i.e. no false-positives).
-[**netdata**](https://my-netdata.io/) alarms are user configurable. Sample config files can be found under directory `health/health.d` of the [Netdata GitHub repository](https://github.com/netdata/netdata/). So, even [`web_log` alarms can be adapted to your needs](../../../health/health.d/web_log.conf).
+Netdata alarms are user-configurable. Sample config files can be found under directory `health/health.d` of the [Netdata GitHub repository](https://github.com/netdata/netdata/).
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fweb_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
index c1e1dcfbb..04ecadec8 100644
--- a/collectors/python.d.plugin/web_log/web_log.chart.py
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
@@ -23,7 +23,6 @@ except ImportError:
from bases.collection import read_last_line
from bases.FrameworkServices.LogService import LogService
-
ORDER_APACHE_CACHE = [
'apache_cache',
]
@@ -821,8 +820,8 @@ class Web:
dim_id = match_dict['vhost'].replace('.', '_')
if dim_id not in self.data:
self.charts['vhost'].add_dimension([dim_id,
- match_dict['vhost'],
- 'incremental'])
+ match_dict['vhost'],
+ 'incremental'])
self.data[dim_id] = 0
self.data[dim_id] += 1
@@ -961,9 +960,9 @@ class Squid:
return False
self.storage['dynamic'] = {
'http_code': {
- 'chart': 'squid_detailed_response_codes',
- 'func_dim_id': None,
- 'func_dim': None
+ 'chart': 'squid_detailed_response_codes',
+ 'func_dim_id': None,
+ 'func_dim': None
},
'hier_code': {
'chart': 'squid_hier_code',
@@ -1105,7 +1104,7 @@ def get_hist(index, buckets, time):
:param time: time
:return: None
"""
- for i in range(len(index)-1, -1, -1):
+ for i in range(len(index) - 1, -1, -1):
if time <= index[i]:
buckets[i] += 1
else:
diff --git a/collectors/slabinfo.plugin/Makefile.in b/collectors/slabinfo.plugin/Makefile.in
deleted file mode 100644
index 93d45c13e..000000000
--- a/collectors/slabinfo.plugin/Makefile.in
+++ /dev/null
@@ -1,543 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/slabinfo.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- slabinfo.plugin \
- $(NULL)
-
-SUFFIXES = .in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/slabinfo.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/slabinfo.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(am__empty):
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- -e 's#[@]registrydir_POST@#$(registrydir)#g' \
- -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/slabinfo.plugin/README.md b/collectors/slabinfo.plugin/README.md
index 444cd8e38..21d83c999 100644
--- a/collectors/slabinfo.plugin/README.md
+++ b/collectors/slabinfo.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "slabinfo.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/slabinfo.plugin/README.md
+-->
+
# slabinfo.plugin
SLAB is a cache mechanism used by the Kernel to avoid fragmentation.
diff --git a/collectors/slabinfo.plugin/slabinfo.c b/collectors/slabinfo.plugin/slabinfo.c
index b92bc8ac2..00e0d3913 100644
--- a/collectors/slabinfo.plugin/slabinfo.c
+++ b/collectors/slabinfo.plugin/slabinfo.c
@@ -163,12 +163,12 @@ struct slabinfo *read_file_slabinfo() {
slabdebug("-> Reading procfile %s", PLUGIN_SLABINFO_PROCFILE);
static procfile *ff = NULL;
- static long slab_pagesize = 0;
+ static long slab_pagesize = 0;
- if (unlikely(!slab_pagesize)) {
- slab_pagesize = sysconf(_SC_PAGESIZE);
- slabdebug(" Discovered pagesize: %ld", slab_pagesize);
- }
+ if (unlikely(!slab_pagesize)) {
+ slab_pagesize = sysconf(_SC_PAGESIZE);
+ slabdebug(" Discovered pagesize: %ld", slab_pagesize);
+ }
if(unlikely(!ff)) {
ff = procfile_reopen(ff, PLUGIN_SLABINFO_PROCFILE, " ,:" , PROCFILE_FLAG_DEFAULT);
@@ -191,7 +191,7 @@ struct slabinfo *read_file_slabinfo() {
slabdebug(" Read %lu lines from procfile", (unsigned long)lines);
for(l = 2; l < lines; l++) {
if (unlikely(procfile_linewords(ff, l) < 14)) {
- slabdebug(" Line %lu has only %lu words, skipping", (unsigned long)l, procfile_linewords(ff,l));
+ slabdebug(" Line %zu has only %zu words, skipping", l, procfile_linewords(ff,l));
continue;
}
@@ -231,7 +231,7 @@ struct slabinfo *read_file_slabinfo() {
else
s->obj_filling = 0;
- slabdebug(" Updated slab %s: %lu %lu %lu %lu %lu / %lu %lu %lu / %lu %lu %lu / %lu %lu %hhu",
+ slabdebug(" Updated slab %s: %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64" / %"PRIu64" %"PRIu64" %"PRIu64" / %"PRIu64" %"PRIu64" %"PRIu64" / %"PRIu64" %"PRIu64" %hhu",
name, s->active_objs, s->num_objs, s->obj_size, s->obj_per_slab, s->pages_per_slab,
s->tune_limit, s->tune_batchcnt, s->tune_shared_factor,
s->data_active_slabs, s->data_num_slabs, s->data_shared_avail,
@@ -304,7 +304,7 @@ unsigned int do_slab_stats(int update_every) {
, "slabmemory"
);
for (s = sactive; s; s = s->next) {
- printf("SET %s = %lu\n"
+ printf("SET %s = %"PRIu64"\n"
, s->name
, s->mem_usage
);
@@ -334,7 +334,7 @@ unsigned int do_slab_stats(int update_every) {
, "slabwaste"
);
for (s = sactive; s; s = s->next) {
- printf("SET %s = %lu\n"
+ printf("SET %s = %"PRIu64"\n"
, s->name
, s->mem_waste
);
diff --git a/collectors/statsd.plugin/.keep b/collectors/statsd.plugin/.keep
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/statsd.plugin/.keep
+++ /dev/null
diff --git a/collectors/statsd.plugin/Makefile.am b/collectors/statsd.plugin/Makefile.am
index 87b6ca7a9..b01302d16 100644
--- a/collectors/statsd.plugin/Makefile.am
+++ b/collectors/statsd.plugin/Makefile.am
@@ -14,7 +14,6 @@ dist_statsdconfig_DATA = \
userstatsdconfigdir=$(configdir)/statsd.d
dist_userstatsdconfig_DATA = \
- .keep \
$(NULL)
# Explicitly install directories to avoid permission issues due to umask
diff --git a/collectors/statsd.plugin/Makefile.in b/collectors/statsd.plugin/Makefile.in
deleted file mode 100644
index bbaf6534b..000000000
--- a/collectors/statsd.plugin/Makefile.in
+++ /dev/null
@@ -1,614 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/statsd.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(dist_statsdconfig_DATA) $(dist_userstatsdconfig_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(statsdconfigdir)" \
- "$(DESTDIR)$(userstatsdconfigdir)"
-DATA = $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \
- $(dist_userstatsdconfig_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-statsdconfigdir = $(libconfigdir)/statsd.d
-dist_statsdconfig_DATA = \
- example.conf \
- $(NULL)
-
-userstatsdconfigdir = $(configdir)/statsd.d
-dist_userstatsdconfig_DATA = \
- .keep \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_statsdconfigDATA: $(dist_statsdconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(statsdconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(statsdconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(statsdconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(statsdconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_statsdconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(statsdconfigdir)'; $(am__uninstall_files_from_dir)
-install-dist_userstatsdconfigDATA: $(dist_userstatsdconfig_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(userstatsdconfigdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(userstatsdconfigdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userstatsdconfigdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(userstatsdconfigdir)" || exit $$?; \
- done
-
-uninstall-dist_userstatsdconfigDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(userstatsdconfigdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(statsdconfigdir)" "$(DESTDIR)$(userstatsdconfigdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_statsdconfigDATA \
- install-dist_userstatsdconfigDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am: install-exec-local
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_statsdconfigDATA \
- uninstall-dist_userstatsdconfigDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_statsdconfigDATA \
- install-dist_userstatsdconfigDATA install-dvi install-dvi-am \
- install-exec install-exec-am install-exec-local install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_statsdconfigDATA \
- uninstall-dist_userstatsdconfigDATA
-
-.PRECIOUS: Makefile
-
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(userstatsdconfigdir)
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md
index dcbae6c5e..332b60e73 100644
--- a/collectors/statsd.plugin/README.md
+++ b/collectors/statsd.plugin/README.md
@@ -1,3 +1,9 @@
+<!--
+title: "statsd.plugin"
+description: "The Netdata Agent is a fully-featured statsd server that collects metrics from any custom application and visualizes them in real-time."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/statsd.plugin/README.md
+-->
+
# statsd.plugin
statsd is a system to collect data from any application. Applications are sending metrics to it, usually via non-blocking UDP communication, and statsd servers collect these metrics, perform a few simple calculations on them and push them to backend time-series databases.
@@ -32,7 +38,7 @@ Netdata fully supports the statsd protocol. All statsd client libraries can be u
`:value` can be omitted and statsd will assume it is `1`. `|c`, `|C` and `|m` can be omitted an statsd will assume it is `|m`. So, the application may send just `name` and statsd will parse it as `name:1|m`.
- For counters use `|c` (esty/statsd compatible) or `|C` (brubeck compatible), for meters use `|m`.
+ For counters use `|c` (etsy/statsd compatible) or `|C` (brubeck compatible), for meters use `|m`.
Sampling rate is supported (check below).
@@ -212,7 +218,8 @@ The same chart with the `sum` unselected:
### synthetic statsd charts
-Using synthetic charts, you can create dedicated sections on the dashboard to render the charts. You can control everything: the main menu, the submenus, the charts, the dimensions on each chart, etc.
+Use synthetic charts to create dedicated sections on the dashboard to render the charts, with control over the main
+menu, the submenus, the charts, the dimensions on each chart, and more.
Synthetic charts are organized in
@@ -222,7 +229,7 @@ Synthetic charts are organized in
For each application you need to create a `.conf` file in `/etc/netdata/statsd.d`.
-So, to create the statsd application `myapp`, you can create the file `/etc/netdata/statsd.d/myapp.conf`, with this content:
+So, to create the statsd application `myapp`, create the file `/etc/netdata/statsd.d/myapp.conf`, with this content:
```
[app]
@@ -264,9 +271,9 @@ Using the above configuration `myapp` should get its own section on the dashboar
`[dictionary]` defines name-value associations. These are used to renaming metrics, when added to synthetic charts. Metric names are also defined at each `dimension` line. However, using the dictionary dimension names can be declared globally, for each app and is the only way to rename dimensions when using patterns. Of course the dictionary can be empty or missing.
-Then, you can add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alarm templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational.
+Then, add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alarm templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational.
-You can add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters:
+Add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters:
1. the metric name, as it is collected (it has to be matched by the `metrics =` pattern of the app)
2. the dimension name, as it should be shown on the chart
@@ -283,7 +290,7 @@ dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS
`pattern` is a keyword. When set, `METRIC` is expected to be a Netdata simple pattern that will be used to match all the statsd metrics to be added to the chart. So, `pattern` automatically matches any number of statsd metrics, all of which will be added as separate chart dimensions.
-`TYPE`, `MUTLIPLIER`, `DIVIDER` and `OPTIONS` are optional.
+`TYPE`, `MULTIPLIER`, `DIVIDER` and `OPTIONS` are optional.
`TYPE` can be:
@@ -344,12 +351,12 @@ Netdata uses this dictionary as follows:
3. If any of the above succeeds, Netdata uses the `value` of the dictionary, to set the name of the dimension. The dimensions will have as ID the original statsd metric name, and as name, the dictionary value.
-So, you can use the dictionary in 2 ways:
+Use the dictionary in 2 ways:
1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name`
2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name`
-In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alarms you can use either of the 2 as `${myapp.metric1}` or `${metric1 name}`.
+In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alarms use either of the 2 as `${myapp.metric1}` or `${metric1 name}`.
> keep in mind that if you add multiple times the same statsd metric to a chart, Netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, Netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc.
@@ -382,7 +389,7 @@ To add all response codes of `myapp.api.get` to a chart use this:
dimension = pattern 'myapp.api.get.* '' last 1 1
```
-The above will add dimension named `200`, `400` and `500` (yes, Netdata extracts the wildcarded part of the metric name - so the dimensions will be named with whatever the `*` matched). You can rename the dimensions with this:
+The above will add dimension named `200`, `400` and `500` (yes, Netdata extracts the wildcarded part of the metric name - so the dimensions will be named with whatever the `*` matched). Rename the dimensions with this:
```
[dictionary]
@@ -440,11 +447,12 @@ Using the above, the dimensions will be added as `GET`, `ADD` and `DELETE`.
~~(although this is required for incremental values, Netdata allows mixing incremental and absolute values on the same charts, so this little limitation [i.e. 2 values to start visualization], is applied on all Netdata dimensions).~~
-(statsd metrics do not loose their first data collection due to interpolation anymore - fixed with [PR #2411](https://github.com/netdata/netdata/pull/2411))
+(statsd metrics do not lose their first data collection due to interpolation anymore - fixed with [PR #2411](https://github.com/netdata/netdata/pull/2411))
## sending statsd metrics from shell scripts
-You can send/update statsd metrics from shell scripts. You can use this feature, to visualize in Netdata automated jobs you run on your servers.
+Send/update statsd metrics from shell scripts to visualize automated jobs you run on your servers using the Netdata
+Agent.
The command you need to run is:
diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c
index e8e6301a8..a8f94130a 100644
--- a/collectors/statsd.plugin/statsd.c
+++ b/collectors/statsd.plugin/statsd.c
@@ -22,7 +22,7 @@
#define STATSD_FIRST_PTR_MUTEX_UNLOCK(index) netdata_mutex_unlock(&((index)->first_mutex))
#define STATSD_DICTIONARY_OPTIONS DICTIONARY_FLAG_DEFAULT
#else
-#define STATSD_AVL_TREE avl_tree
+#define STATSD_AVL_TREE avl_tree_type
#define STATSD_AVL_INSERT avl_insert
#define STATSD_AVL_SEARCH avl_search
#define STATSD_AVL_INDEX_INIT { .root = NULL, .compar = statsd_metric_compare }
@@ -698,7 +698,10 @@ static inline size_t statsd_process(char *buffer, size_t size, int require_newli
s = name_end = (char *)statsd_parse_skip_up_to(name = s, ':', '|');
if(name == name_end) {
- s = statsd_parse_skip_spaces(s);
+ if (*s) {
+ s++;
+ s = statsd_parse_skip_spaces(s);
+ }
continue;
}
@@ -1324,7 +1327,7 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA
else if (!strcmp(name, "dimension")) {
// metric [name [type [multiplier [divisor]]]]
char *words[10];
- pluginsd_split_words(value, words, 10);
+ pluginsd_split_words(value, words, 10, NULL, NULL, 0);
int pattern = 0;
size_t i = 0;
diff --git a/collectors/tc.plugin/Makefile.in b/collectors/tc.plugin/Makefile.in
deleted file mode 100644
index 118de240c..000000000
--- a/collectors/tc.plugin/Makefile.in
+++ /dev/null
@@ -1,617 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/tc.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
- $(dist_noinst_DATA) $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pluginsdir)"
-SCRIPTS = $(dist_plugins_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/build/subst.inc
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- tc-qos-helper.sh \
- $(NULL)
-
-SUFFIXES = .in
-dist_plugins_SCRIPTS = \
- tc-qos-helper.sh \
- $(NULL)
-
-dist_noinst_DATA = \
- tc-qos-helper.sh.in \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc $(am__empty):
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pluginsSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pluginsdir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_pluginsSCRIPTS
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_pluginsSCRIPTS
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_pluginsSCRIPTS install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \
- uninstall-am uninstall-dist_pluginsSCRIPTS
-
-.PRECIOUS: Makefile
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]pluginsdir_POST@#$(pluginsdir)#g' \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- -e 's#[@]cachedir_POST@#$(cachedir)#g' \
- -e 's#[@]registrydir_POST@#$(registrydir)#g' \
- -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md
index 6e5306128..480076087 100644
--- a/collectors/tc.plugin/README.md
+++ b/collectors/tc.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "tc.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/tc.plugin/README.md
+-->
+
# tc.plugin
Live demo - **[see it in action here](https://registry.my-netdata.io/#menu_tc)** !
@@ -8,9 +13,9 @@ Netdata monitors `tc` QoS classes for all interfaces.
If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/) it will collect interface and class names.
-There is a [shell helper](tc-qos-helper.sh.in) for this (all parsing is done by the plugin in `C` code - this shell script is just a configuration for the command to run to get `tc` output).
+There is a [shell helper](https://raw.githubusercontent.com/netdata/netdata/master/collectors/tc.plugin/tc-qos-helper.sh.in) for this (all parsing is done by the plugin in `C` code - this shell script is just a configuration for the command to run to get `tc` output).
-The source of the tc plugin is [here](plugin_tc.c). It is somewhat complex, because a state machine was needed to keep track of all the `tc` classes, including the pseudo classes tc dynamically creates.
+The source of the tc plugin is [here](https://raw.githubusercontent.com/netdata/netdata/master/collectors/tc.plugin/plugin_tc.c). It is somewhat complex, because a state machine was needed to keep track of all the `tc` classes, including the pseudo classes tc dynamically creates.
## Motivation
@@ -62,7 +67,7 @@ QoS is about 2 features:
When your system is under a DDoS attack, it will get a lot more bandwidth compared to the one it can handle and probably your applications will crash. Setting a limit on the inbound traffic using QoS, will protect your servers (throttle the requests) and depending on the size of the attack may allow your legitimate users to access the server, while the attack is taking place.
- Using QoS together with a [SYNPROXY](../../collectors/proc.plugin/README.md) will provide a great degree of protection against most DDoS attacks. Actually when I wrote that article, a few folks tried to DDoS the Netdata demo site to see in real-time the SYNPROXY operation. They did not do it right, but anyway a great deal of requests reached the Netdata server. What saved Netdata was QoS. The Netdata demo server has QoS installed, so the requests were throttled and the server did not even reach the point of resource starvation. Read about it [here](../../collectors/proc.plugin/README.md).
+ Using QoS together with a [SYNPROXY](/collectors/proc.plugin/README.md) will provide a great degree of protection against most DDoS attacks. Actually when I wrote that article, a few folks tried to DDoS the Netdata demo site to see in real-time the SYNPROXY operation. They did not do it right, but anyway a great deal of requests reached the Netdata server. What saved Netdata was QoS. The Netdata demo server has QoS installed, so the requests were throttled and the server did not even reach the point of resource starvation. Read about it [here](/collectors/proc.plugin/README.md).
On top of all these, QoS is extremely light. You will configure it once, and this is it. It will not bother you again and it will not use any noticeable CPU resources, especially on application and database servers.
@@ -167,7 +172,7 @@ And this is what you are going to get:
## QoS Configuration with tc
-First, setup the tc rules in rc.local using commands to assign different DSCP markings to different classids. You can see one such example in [github issue #4563](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).
+First, setup the tc rules in rc.local using commands to assign different QoS markings to different classids. You can see one such example in [github issue #4563](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).
Then, map the classids to names by creating `/etc/iproute2/tc_cls`. For example:
diff --git a/collectors/tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c
index 9245b0857..b92450efe 100644
--- a/collectors/tc.plugin/plugin_tc.c
+++ b/collectors/tc.plugin/plugin_tc.c
@@ -81,7 +81,7 @@ struct tc_device {
RRDSET *st_tokens;
RRDSET *st_ctokens;
- avl_tree classes_index;
+ avl_tree_type classes_index;
struct tc_class *classes;
struct tc_class *last_class;
@@ -102,7 +102,7 @@ static int tc_device_compare(void* a, void* b) {
else return strcmp(((struct tc_device *)a)->id, ((struct tc_device *)b)->id);
}
-avl_tree tc_device_root_index = {
+avl_tree_type tc_device_root_index = {
NULL,
tc_device_compare
};
diff --git a/collectors/tc.plugin/tc-qos-helper.sh b/collectors/tc.plugin/tc-qos-helper.sh
deleted file mode 100644
index c9e6727a0..000000000
--- a/collectors/tc.plugin/tc-qos-helper.sh
+++ /dev/null
@@ -1,297 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# This script is a helper to allow netdata collect tc data.
-# tc output parsing has been implemented in C, inside netdata
-# This script allows setting names to dimensions.
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-# -----------------------------------------------------------------------------
-# logging functions
-
-PROGRAM_NAME="$(basename "$0")"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-warning() {
- log WARNING "${@}"
-}
-
-error() {
- log ERROR "${@}"
-}
-
-info() {
- log INFO "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-debug=0
-debug() {
- [ $debug -eq 1 ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-# find /var/run/fireqos
-
-# the default
-fireqos_run_dir="/var/run/fireqos"
-
-function realdir() {
- local r
- local t
- r="$1"
- t="$(readlink "$r")"
-
- while [ "$t" ]; do
- r=$(cd "$(dirname "$r")" && cd "$(dirname "$t")" && pwd -P)/$(basename "$t")
- t=$(readlink "$r")
- done
-
- dirname "$r"
-}
-
-if [ ! -d "${fireqos_run_dir}" ]; then
-
- # the fireqos executable - we will use it to find its config
- fireqos="$(command -v fireqos 2>/dev/null)"
-
- if [ -n "${fireqos}" ]; then
-
- fireqos_exec_dir="$(realdir "${fireqos}")"
-
- if [ -n "${fireqos_exec_dir}" ] && [ "${fireqos_exec_dir}" != "." ] && [ -f "${fireqos_exec_dir}/install.config" ]; then
- LOCALSTATEDIR=
- #shellcheck source=/dev/null
- source "${fireqos_exec_dir}/install.config"
-
- if [ -d "${LOCALSTATEDIR}/run/fireqos" ]; then
- fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
- else
- warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
- fi
- else
- warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
- fi
- else
- warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin"
- fi
-fi
-
-# -----------------------------------------------------------------------------
-
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/etc/netdata"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/lib/netdata/conf.d"
-
-plugins_dir="${NETDATA_PLUGINS_DIR}"
-tc="$(command -v tc 2>/dev/null)"
-
-# -----------------------------------------------------------------------------
-# user configuration
-
-# time in seconds to refresh QoS class/qdisc names
-qos_get_class_names_every=120
-
-# time in seconds to exit - netdata will restart the script
-qos_exit_every=3600
-
-# what to use? classes or qdiscs?
-tc_show="qdisc" # can also be "class"
-
-# -----------------------------------------------------------------------------
-# check if we have a valid number for interval
-
-t=${1}
-update_every=$((t))
-[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY}
-[ $((update_every)) -lt 1 ] && update_every=1
-
-# -----------------------------------------------------------------------------
-# allow the user to override our defaults
-
-for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"; do
- if [ -f "${CONFIG}" ]; then
- info "Loading config file '${CONFIG}'..."
- #shellcheck source=/dev/null
- source "${CONFIG}" || error "Failed to load config file '${CONFIG}'."
- else
- warning "Cannot find file '${CONFIG}'."
- fi
-done
-
-case "${tc_show}" in
-qdisc | class) ;;
-
-*)
- error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
- tc_show="qdisc"
- ;;
-esac
-
-# -----------------------------------------------------------------------------
-# default sleep function
-
-LOOPSLEEPMS_LASTWORK=0
-loopsleepms() {
- sleep "$1"
-}
-
-# if found and included, this file overwrites loopsleepms()
-# with a high resolution timer function for precise looping.
-#shellcheck source=/dev/null
-. "${plugins_dir}/loopsleepms.sh.inc"
-
-# -----------------------------------------------------------------------------
-# final checks we can run
-
-if [ -z "${tc}" ] || [ ! -x "${tc}" ]; then
- fatal "cannot find command 'tc' in this system."
-fi
-
-tc_devices=
-fix_names=
-
-# -----------------------------------------------------------------------------
-
-setclassname() {
- if [ "${tc_show}" = "qdisc" ]; then
- echo "SETCLASSNAME $4 $2"
- else
- echo "SETCLASSNAME $3 $2"
- fi
-}
-
-show_tc_cls() {
- [ "${tc_show}" = "qdisc" ] && return 1
-
- local x="${1}"
-
- if [ -f /etc/iproute2/tc_cls ]; then
- local classid name rest
- while read -r classid name rest; do
- if [ -z "${classid}" ] ||
- [ -z "${name}" ] ||
- [ "${classid}" = "#" ] ||
- [ "${name}" = "#" ] ||
- [ "${classid:0:1}" = "#" ] ||
- [ "${name:0:1}" = "#" ]; then
- continue
- fi
- setclassname "" "${name}" "${classid}"
- done </etc/iproute2/tc_cls
- return 0
- fi
- return 1
-}
-
-show_fireqos_names() {
- local x="${1}" name n interface_dev interface_classes_monitor
-
- if [ -f "${fireqos_run_dir}/ifaces/${x}" ]; then
- name="$(<"${fireqos_run_dir}/ifaces/${x}")"
- echo "SETDEVICENAME ${name}"
-
- #shellcheck source=/dev/null
- source "${fireqos_run_dir}/${name}.conf"
- for n in ${interface_classes_monitor}; do
- # shellcheck disable=SC2086
- setclassname ${n//|/ }
- done
- [ -n "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}"
-
- return 0
- fi
-
- return 1
-}
-
-show_tc() {
- local x="${1}"
-
- echo "BEGIN ${x}"
-
- # netdata can parse the output of tc
- ${tc} -s ${tc_show} show dev "${x}"
-
- # check FireQOS names for classes
- if [ -n "${fix_names}" ]; then
- show_fireqos_names "${x}" || show_tc_cls "${x}"
- fi
-
- echo "END ${x}"
-}
-
-find_tc_devices() {
- local count=0 devs dev rest l
-
- # find all the devices in the system
- # without forking
- while IFS=":| " read -r dev rest; do
- count=$((count + 1))
- [ ${count} -le 2 ] && continue
- devs="${devs} ${dev}"
- done </proc/net/dev
-
- # from all the devices find the ones
- # that have QoS defined
- # unfortunately, one fork per device cannot be avoided
- tc_devices=
- for dev in ${devs}; do
- l="$(${tc} class show dev "${dev}" 2>/dev/null)"
- [ -n "${l}" ] && tc_devices="${tc_devices} ${dev}"
- done
-}
-
-# update devices and class names
-# once every 2 minutes
-names_every=$((qos_get_class_names_every / update_every))
-
-# exit this script every hour
-# it will be restarted automatically
-exit_after=$((qos_exit_every / update_every))
-
-c=0
-gc=0
-while true; do
- fix_names=
- c=$((c + 1))
- gc=$((gc + 1))
-
- if [ ${c} -le 1 ] || [ ${c} -ge ${names_every} ]; then
- c=1
- fix_names="YES"
- find_tc_devices
- fi
-
- for d in ${tc_devices}; do
- show_tc "${d}"
- done
-
- echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}"
-
- loopsleepms ${update_every}
-
- [ ${gc} -gt ${exit_after} ] && exit 0
-done
diff --git a/collectors/tc.plugin/tc-qos-helper.sh.in b/collectors/tc.plugin/tc-qos-helper.sh.in
index 50555c8b2..65d33153b 100755
--- a/collectors/tc.plugin/tc-qos-helper.sh.in
+++ b/collectors/tc.plugin/tc-qos-helper.sh.in
@@ -211,7 +211,7 @@ show_fireqos_names() {
if [ -f "${fireqos_run_dir}/ifaces/${x}" ]; then
name="$(<"${fireqos_run_dir}/ifaces/${x}")"
- echo "SETDEVICENAME ${name}"
+ echo "SETDEVICENAME ${name}" || exit
#shellcheck source=/dev/null
source "${fireqos_run_dir}/${name}.conf"
@@ -219,7 +219,7 @@ show_fireqos_names() {
# shellcheck disable=SC2086
setclassname ${n//|/ }
done
- [ -n "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}"
+ [ -n "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}" || exit
return 0
fi
@@ -230,7 +230,7 @@ show_fireqos_names() {
show_tc() {
local x="${1}"
- echo "BEGIN ${x}"
+ echo "BEGIN ${x}" || exit
# netdata can parse the output of tc
${tc} -s ${tc_show} show dev "${x}"
@@ -240,7 +240,7 @@ show_tc() {
show_fireqos_names "${x}" || show_tc_cls "${x}"
fi
- echo "END ${x}"
+ echo "END ${x}" || exit
}
find_tc_devices() {
@@ -289,7 +289,7 @@ while true; do
show_tc "${d}"
done
- echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}"
+ echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}" || exit
loopsleepms ${update_every}
diff --git a/collectors/xenstat.plugin/Makefile.in b/collectors/xenstat.plugin/Makefile.in
deleted file mode 100644
index 350a140ba..000000000
--- a/collectors/xenstat.plugin/Makefile.in
+++ /dev/null
@@ -1,519 +0,0 @@
-# Makefile.in generated by automake 1.15.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2017 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-VPATH = @srcdir@
-am__is_gnu_make = { \
- if test -z '$(MAKELEVEL)'; then \
- false; \
- elif test -n '$(MAKE_HOST)'; then \
- true; \
- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
- true; \
- else \
- false; \
- fi; \
-}
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = collectors/xenstat.plugin
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
- $(top_srcdir)/build/m4/ax_c__generic.m4 \
- $(top_srcdir)/build/m4/ax_c_lto.m4 \
- $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/build/m4/ax_pthread.m4 \
- $(top_srcdir)/build/m4/jemalloc.m4 \
- $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_DATA) \
- $(am__DIST_COMMON)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_noinst_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-am__DIST_COMMON = $(srcdir)/Makefile.in
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CMOCKA_CFLAGS = @CMOCKA_CFLAGS@
-CMOCKA_LIBS = @CMOCKA_LIBS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CUPSCONFIG = @CUPSCONFIG@
-CXX = @CXX@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_BINARY = @CXX_BINARY@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-ENABLE_UNITTESTS = @ENABLE_UNITTESTS@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-JSON_CFLAGS = @JSON_CFLAGS@
-JSON_LIBS = @JSON_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBCRYPTO_CFLAGS = @LIBCRYPTO_CFLAGS@
-LIBCRYPTO_LIBS = @LIBCRYPTO_LIBS@
-LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
-LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBMONGOC_CFLAGS = @LIBMONGOC_CFLAGS@
-LIBMONGOC_LIBS = @LIBMONGOC_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBSSL_CFLAGS = @LIBSSL_CFLAGS@
-LIBSSL_LIBS = @LIBSSL_LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_CUPS_CFLAGS = @OPTIONAL_CUPS_CFLAGS@
-OPTIONAL_CUPS_LIBS = @OPTIONAL_CUPS_LIBS@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_JSONC_LIBS = @OPTIONAL_JSONC_LIBS@
-OPTIONAL_JUDY_LIBS = @OPTIONAL_JUDY_LIBS@
-OPTIONAL_KINESIS_CFLAGS = @OPTIONAL_KINESIS_CFLAGS@
-OPTIONAL_KINESIS_LIBS = @OPTIONAL_KINESIS_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_LZ4_LIBS = @OPTIONAL_LZ4_LIBS@
-OPTIONAL_MATH_CFLAGS = @OPTIONAL_MATH_CFLAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_MONGOC_CFLAGS = @OPTIONAL_MONGOC_CFLAGS@
-OPTIONAL_MONGOC_LIBS = @OPTIONAL_MONGOC_LIBS@
-OPTIONAL_NFACCT_CFLAGS = @OPTIONAL_NFACCT_CFLAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS@
-OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS = @OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS@
-OPTIONAL_SSL_LIBS = @OPTIONAL_SSL_LIBS@
-OPTIONAL_UUID_CFLAGS = @OPTIONAL_UUID_CFLAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_UV_LIBS = @OPTIONAL_UV_LIBS@
-OPTIONAL_XENSTAT_CFLAGS = @OPTIONAL_XENSTAT_CFLAGS@
-OPTIONAL_XENSTAT_LIBS = @OPTIONAL_XENSTAT_LIBS@
-OPTIONAL_ZLIB_CFLAGS = @OPTIONAL_ZLIB_CFLAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PROTOBUF_CFLAGS = @PROTOBUF_CFLAGS@
-PROTOBUF_LIBS = @PROTOBUF_LIBS@
-PROTOC = @PROTOC@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-TEST_CFLAGS = @TEST_CFLAGS@
-TEST_LIBS = @TEST_LIBS@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-XENLIGHT_CFLAGS = @XENLIGHT_CFLAGS@
-XENLIGHT_LIBS = @XENLIGHT_LIBS@
-YAJL_CFLAGS = @YAJL_CFLAGS@
-YAJL_LIBS = @YAJL_LIBS@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_target = @build_target@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libconfigdir = @libconfigdir@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-runstatedir = @runstatedir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/xenstat.plugin/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu collectors/xenstat.plugin/Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(DATA)
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
- pdf-am ps ps-am tags-am uninstall uninstall-am
-
-.PRECIOUS: Makefile
-
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/collectors/xenstat.plugin/README.md b/collectors/xenstat.plugin/README.md
index 4fa047e31..61be271d9 100644
--- a/collectors/xenstat.plugin/README.md
+++ b/collectors/xenstat.plugin/README.md
@@ -1,3 +1,8 @@
+<!--
+title: "xenstat.plugin"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/xenstat.plugin/README.md
+-->
+
# xenstat.plugin
`xenstat.plugin` collects XenServer and XCP-ng statistics.
diff --git a/collectors/xenstat.plugin/xenstat_plugin.c b/collectors/xenstat.plugin/xenstat_plugin.c
index fadd218c5..647ac1db7 100644
--- a/collectors/xenstat.plugin/xenstat_plugin.c
+++ b/collectors/xenstat.plugin/xenstat_plugin.c
@@ -83,16 +83,6 @@ struct vcpu_metrics {
struct vcpu_metrics *next;
};
-struct tmem_metrics {
- unsigned long long curr_eph_pages;
- unsigned long long succ_eph_gets;
- unsigned long long succ_pers_puts;
- unsigned long long succ_pers_gets;
-
- int pages_chart_generated;
- int operation_chart_generated;
-};
-
struct vbd_metrics {
unsigned int id;
@@ -147,12 +137,12 @@ struct domain_metrics {
unsigned int shutdown;
unsigned int crashed;
unsigned int dying;
+ unsigned int cur_vcpus;
unsigned long long cpu_ns;
unsigned long long cur_mem;
unsigned long long max_mem;
- struct tmem_metrics tmem;
struct vcpu_metrics *vcpu_root;
struct vbd_metrics *vbd_root;
struct network_metrics *network_root;
@@ -170,7 +160,6 @@ struct domain_metrics {
struct node_metrics{
unsigned long long tot_mem;
unsigned long long free_mem;
- long freeable_mb;
int num_domains;
unsigned int num_cpus;
unsigned long long node_cpu_hz;
@@ -259,23 +248,18 @@ static struct domain_metrics *domain_metrics_free(struct domain_metrics *d) {
}
static int vcpu_metrics_collect(struct domain_metrics *d, xenstat_domain *domain) {
- static unsigned int last_num_vcpus = 0;
unsigned int num_vcpus = 0;
xenstat_vcpu *vcpu = NULL;
struct vcpu_metrics *vcpu_m = NULL, *last_vcpu_m = NULL;
num_vcpus = xenstat_domain_num_vcpus(domain);
- if(unlikely(num_vcpus != last_num_vcpus)) {
- d->num_vcpus_changed = 1;
- last_num_vcpus = num_vcpus;
- }
for(vcpu_m = d->vcpu_root; vcpu_m ; vcpu_m = vcpu_m->next)
vcpu_m->updated = 0;
vcpu_m = d->vcpu_root;
- unsigned int i;
+ unsigned int i, num_online_vcpus=0;
for(i = 0; i < num_vcpus; i++) {
if(unlikely(!vcpu_m)) {
vcpu_m = callocz(1, sizeof(struct vcpu_metrics));
@@ -294,6 +278,7 @@ static int vcpu_metrics_collect(struct domain_metrics *d, xenstat_domain *domain
}
vcpu_m->online = xenstat_vcpu_online(vcpu);
+ if(likely(vcpu_m->online)) { num_online_vcpus++; }
vcpu_m->ns = xenstat_vcpu_ns(vcpu);
vcpu_m->updated = 1;
@@ -302,6 +287,11 @@ static int vcpu_metrics_collect(struct domain_metrics *d, xenstat_domain *domain
vcpu_m = vcpu_m->next;
}
+ if(unlikely(num_online_vcpus != d->cur_vcpus)) {
+ d->num_vcpus_changed = 1;
+ d->cur_vcpus = num_online_vcpus;
+ }
+
return 0;
}
@@ -415,7 +405,6 @@ static int xenstat_collect(xenstat_handle *xhandle, libxl_ctx *ctx, libxl_dominf
node_metrics.tot_mem = xenstat_node_tot_mem(node);
node_metrics.free_mem = xenstat_node_free_mem(node);
- node_metrics.freeable_mb = xenstat_node_freeable_mb(node);
node_metrics.num_domains = xenstat_node_num_domains(node);
node_metrics.num_cpus = xenstat_node_num_cpus(node);
node_metrics.node_cpu_hz = xenstat_node_cpu_hz(node);
@@ -457,12 +446,6 @@ static int xenstat_collect(xenstat_handle *xhandle, libxl_ctx *ctx, libxl_dominf
d->cur_mem = xenstat_domain_cur_mem(domain);
d->max_mem = xenstat_domain_max_mem(domain);
- xenstat_tmem *tmem = xenstat_domain_tmem(domain);
- d->tmem.curr_eph_pages = xenstat_tmem_curr_eph_pages(tmem);
- d->tmem.succ_eph_gets = xenstat_tmem_succ_eph_gets(tmem);
- d->tmem.succ_pers_puts = xenstat_tmem_succ_pers_puts(tmem);
- d->tmem.succ_pers_gets = xenstat_tmem_succ_pers_gets(tmem);
-
if(unlikely(vcpu_metrics_collect(d, domain) || vbd_metrics_collect(d, domain) || network_metrics_collect(d, domain))) {
xenstat_free_node(node);
return 1;
@@ -477,7 +460,7 @@ static int xenstat_collect(xenstat_handle *xhandle, libxl_ctx *ctx, libxl_dominf
}
static void xenstat_send_node_metrics() {
- static int mem_chart_generated = 0, tmem_chart_generated = 0, domains_chart_generated = 0, cpus_chart_generated = 0, cpu_freq_chart_generated = 0;
+ static int mem_chart_generated = 0, domains_chart_generated = 0, cpus_chart_generated = 0, cpu_freq_chart_generated = 0;
// ----------------------------------------------------------------
@@ -503,25 +486,6 @@ static void xenstat_send_node_metrics() {
// ----------------------------------------------------------------
- if(unlikely(!tmem_chart_generated)) {
- printf("CHART xenstat.tmem '' 'Freeable Transcedent Memory' 'MiB' 'memory' '' line %d %d '' %s\n"
- , NETDATA_CHART_PRIO_XENSTAT_NODE_TMEM
- , netdata_update_every
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION %s '' absolute 1 %d\n", "freeable", netdata_update_every * 1024 * 1024);
- tmem_chart_generated = 1;
- }
-
- printf(
- "BEGIN xenstat.tmem\n"
- "SET freeable = %lld\n"
- "END\n"
- , (collected_number) node_metrics.freeable_mb
- );
-
- // ----------------------------------------------------------------
-
if(unlikely(!domains_chart_generated)) {
printf("CHART xenstat.domains '' 'Number of Domains' 'domains' 'domains' '' line %d %d '' %s\n"
, NETDATA_CHART_PRIO_XENSTAT_NODE_DOMAINS
@@ -617,30 +581,6 @@ static void print_domain_mem_chart_definition(char *type, int obsolete_flag) {
printf("DIMENSION current '' absolute 1 %d\n", netdata_update_every * 1024 * 1024);
}
-static void print_domain_tmem_pages_chart_definition(char *type, int obsolete_flag) {
- printf("CHART %s.tmem_pages '' 'Current Number of Transcedent Memory Ephemeral Pages' 'pages' 'memory' 'xendomain.tmem_pages' line %d %d %s %s\n"
- , type
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_TMEM_PAGES
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION pages '' absolute 1 %d\n", netdata_update_every);
-}
-
-static void print_domain_tmem_operations_chart_definition(char *type, int obsolete_flag) {
- printf("CHART %s.tmem_operations '' 'Successful Transcedent Memory Puts and Gets' 'events/s' 'memory' 'xendomain.tmem_operations' line %d %d %s %s\n"
- , type
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_TMEM_OPERATIONS
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION ephemeral_gets 'ephemeral gets' incremental 1 %d\n", netdata_update_every);
- printf("DIMENSION persistent_puts 'persistent puts' incremental 1 %d\n", netdata_update_every);
- printf("DIMENSION persistent_gets 'persistent gets' incremental 1 %d\n", netdata_update_every);
-}
-
static void print_domain_vcpu_chart_definition(char *type, struct domain_metrics *d, int obsolete_flag) {
struct vcpu_metrics *vcpu_m;
@@ -715,7 +655,7 @@ static void print_domain_network_bytes_chart_definition(char *type, unsigned int
}
static void print_domain_network_packets_chart_definition(char *type, unsigned int network, int obsolete_flag) {
- printf("CHART %s.packets_network%u '' 'Network%u Recieved/Sent Packets' 'packets/s' 'network' 'xendomain.packets_network' line %d %d %s %s\n"
+ printf("CHART %s.packets_network%u '' 'Network%u Received/Sent Packets' 'packets/s' 'network' 'xendomain.packets_network' line %d %d %s %s\n"
, type
, network
, network
@@ -743,7 +683,7 @@ static void print_domain_network_errors_chart_definition(char *type, unsigned in
}
static void print_domain_network_drops_chart_definition(char *type, unsigned int network, int obsolete_flag) {
- printf("CHART %s.drops_network%u '' 'Network%u Recieve/Transmit Drops' 'drops/s' 'network' 'xendomain.drops_network' line %d %d %s %s\n"
+ printf("CHART %s.drops_network%u '' 'Network%u Receive/Transmit Drops' 'drops/s' 'network' 'xendomain.drops_network' line %d %d %s %s\n"
, type
, network
, network
@@ -845,38 +785,6 @@ static void xenstat_send_domain_metrics() {
// ----------------------------------------------------------------
- if(unlikely(!d->tmem.pages_chart_generated)) {
- print_domain_tmem_pages_chart_definition(type, CHART_IS_NOT_OBSOLETE);
- d->tmem.pages_chart_generated = 1;
- }
- printf(
- "BEGIN %s.tmem_pages\n"
- "SET pages = %lld\n"
- "END\n"
- , type
- , (collected_number)d->tmem.curr_eph_pages
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!d->tmem.operation_chart_generated)) {
- print_domain_tmem_operations_chart_definition(type, CHART_IS_NOT_OBSOLETE);
- d->tmem.operation_chart_generated = 1;
- }
- printf(
- "BEGIN %s.tmem_operations\n"
- "SET ephemeral_gets = %lld\n"
- "SET persistent_puts = %lld\n"
- "SET persistent_gets = %lld\n"
- "END\n"
- , type
- , (collected_number)d->tmem.succ_eph_gets
- , (collected_number)d->tmem.succ_pers_puts
- , (collected_number)d->tmem.succ_eph_gets
- );
-
- // ----------------------------------------------------------------
-
struct vbd_metrics *vbd_m;
for(vbd_m = d->vbd_root; vbd_m; vbd_m = vbd_m->next) {
if(likely(vbd_m->updated && !vbd_m->error)) {
@@ -953,7 +861,7 @@ static void xenstat_send_domain_metrics() {
}
printf(
"BEGIN %s.bytes_network%u\n"
- "SET recieved = %lld\n"
+ "SET received = %lld\n"
"SET sent = %lld\n"
"END\n"
, type
@@ -970,7 +878,7 @@ static void xenstat_send_domain_metrics() {
}
printf(
"BEGIN %s.packets_network%u\n"
- "SET recieved = %lld\n"
+ "SET received = %lld\n"
"SET sent = %lld\n"
"END\n"
, type
@@ -987,7 +895,7 @@ static void xenstat_send_domain_metrics() {
}
printf(
"BEGIN %s.errors_network%u\n"
- "SET recieved = %lld\n"
+ "SET received = %lld\n"
"SET sent = %lld\n"
"END\n"
, type
@@ -1004,7 +912,7 @@ static void xenstat_send_domain_metrics() {
}
printf(
"BEGIN %s.drops_network%u\n"
- "SET recieved = %lld\n"
+ "SET received = %lld\n"
"SET sent = %lld\n"
"END\n"
, type
@@ -1036,8 +944,6 @@ static void xenstat_send_domain_metrics() {
print_domain_cpu_chart_definition(type, CHART_IS_OBSOLETE);
print_domain_vcpu_chart_definition(type, d, CHART_IS_OBSOLETE);
print_domain_mem_chart_definition(type, CHART_IS_OBSOLETE);
- print_domain_tmem_pages_chart_definition(type, CHART_IS_OBSOLETE);
- print_domain_tmem_operations_chart_definition(type, CHART_IS_OBSOLETE);
d = domain_metrics_free(d);
}