From 8a7b72f7cd1ccd547a03eb4243294e741d661d3f Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 8 Feb 2019 08:30:37 +0100 Subject: Adding upstream version 1.12.0. Signed-off-by: Daniel Baumann --- .codacy.yml | 2 + .github/CODEOWNERS | 25 +- .github/ISSUE_TEMPLATE.md | 10 + .github/ISSUE_TEMPLATE/bug_report.md | 34 + .github/ISSUE_TEMPLATE/feature_request.md | 16 + .github/ISSUE_TEMPLATE/question.md | 25 + .github/PULL_REQUEST_TEMPLATE.md | 19 + .github/stale.yml | 19 + .gitignore | 26 +- .travis.yml | 85 + .travis/README.md | 27 +- .travis/containerized_build.sh | 7 - .travis/create_artifacts.sh | 37 +- .travis/gcs-credentials.json.enc | Bin 0 -> 2320 bytes .travis/generate_changelog.sh | 33 +- .travis/images/Dockerfile.alpine | 5 - .travis/images/Dockerfile.centos6 | 5 - .travis/images/Dockerfile.centos7 | 5 - .travis/images/Dockerfile.ubuntu1804 | 6 - .travis/labeler.sh | 105 +- .travis/nightlies.sh | 45 + .travis/releaser.sh | 40 +- .travis/tagger.sh | 42 +- CHANGELOG.md | 835 ++ CMakeLists.txt | 575 ++ CODE_OF_CONDUCT.md | 2 + CONTRIBUTING.md | 56 +- CONTRIBUTORS.md | 3 + HISTORICAL_CHANGELOG.md | 655 ++ Makefile.am | 79 +- Makefile.in | 3036 ------ README.md | 81 +- REDISTRIBUTED.md | 21 +- aclocal.m4 | 1355 --- backends/Makefile.in | 658 -- backends/README.md | 33 +- backends/WALKTHROUGH.md | 4 +- backends/backends.c | 2 +- backends/graphite/Makefile.in | 457 - backends/json/Makefile.in | 457 - backends/opentsdb/Makefile.in | 457 - backends/prometheus/Makefile.in | 464 - backends/prometheus/README.md | 4 +- backends/prometheus/backend_prometheus.c | 16 +- build/Dockerfile | 8 - build/build.sh | 12 +- build/subst.inc | 4 +- collectors/Makefile.am | 1 + collectors/Makefile.in | 663 -- collectors/README.md | 5 +- collectors/all.h | 17 + collectors/apps.plugin/Makefile.in | 521 -- collectors/apps.plugin/README.md | 15 +- collectors/apps.plugin/apps_groups.conf | 5 +- collectors/apps.plugin/apps_plugin.c | 133 +- collectors/cgroups.plugin/Makefile.in | 563 -- collectors/cgroups.plugin/README.md | 15 +- collectors/cgroups.plugin/cgroup-name.sh | 196 - collectors/cgroups.plugin/cgroup-name.sh.in | 258 +- collectors/cgroups.plugin/cgroup-network.c | 7 + collectors/cgroups.plugin/sys_fs_cgroup.c | 44 +- collectors/charts.d.plugin/.keep | 0 collectors/charts.d.plugin/Makefile.am | 3 +- collectors/charts.d.plugin/Makefile.in | 953 -- collectors/charts.d.plugin/README.md | 2 + collectors/charts.d.plugin/ap/README.md | 2 + collectors/charts.d.plugin/ap/ap.chart.sh | 91 +- collectors/charts.d.plugin/apache/README.md | 8 +- collectors/charts.d.plugin/apache/apache.chart.sh | 79 +- collectors/charts.d.plugin/apcupsd/README.md | 7 + .../charts.d.plugin/apcupsd/apcupsd.chart.sh | 189 +- .../charts.d.plugin/charts.d.dryrun-helper.sh | 58 +- collectors/charts.d.plugin/charts.d.plugin | 743 -- collectors/charts.d.plugin/charts.d.plugin.in | 924 +- collectors/charts.d.plugin/cpu_apps/README.md | 6 +- .../charts.d.plugin/cpu_apps/cpu_apps.chart.sh | 20 +- collectors/charts.d.plugin/cpufreq/README.md | 6 +- .../charts.d.plugin/cpufreq/cpufreq.chart.sh | 18 +- collectors/charts.d.plugin/example/README.md | 4 + .../charts.d.plugin/example/example.chart.sh | 15 +- collectors/charts.d.plugin/exim/README.md | 6 +- collectors/charts.d.plugin/exim/exim.chart.sh | 26 +- collectors/charts.d.plugin/hddtemp/README.md | 8 +- .../charts.d.plugin/hddtemp/hddtemp.chart.sh | 24 +- collectors/charts.d.plugin/libreswan/README.md | 2 + .../charts.d.plugin/libreswan/libreswan.chart.sh | 32 +- collectors/charts.d.plugin/load_average/README.md | 4 + .../load_average/load_average.chart.sh | 14 +- collectors/charts.d.plugin/loopsleepms.sh.inc | 282 +- collectors/charts.d.plugin/mem_apps/README.md | 6 +- .../charts.d.plugin/mem_apps/mem_apps.chart.sh | 13 +- collectors/charts.d.plugin/mysql/README.md | 8 +- collectors/charts.d.plugin/mysql/mysql.chart.sh | 71 +- collectors/charts.d.plugin/nginx/README.md | 6 +- collectors/charts.d.plugin/nginx/nginx.chart.sh | 37 +- collectors/charts.d.plugin/nut/README.md | 2 + collectors/charts.d.plugin/nut/nut.chart.sh | 35 +- collectors/charts.d.plugin/opensips/README.md | 7 + .../charts.d.plugin/opensips/opensips.chart.sh | 10 +- collectors/charts.d.plugin/phpfpm/README.md | 6 +- collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh | 49 +- collectors/charts.d.plugin/postfix/README.md | 8 +- .../charts.d.plugin/postfix/postfix.chart.sh | 14 +- collectors/charts.d.plugin/sensors/README.md | 7 +- .../charts.d.plugin/sensors/sensors.chart.sh | 195 +- collectors/charts.d.plugin/squid/README.md | 9 +- collectors/charts.d.plugin/squid/squid.chart.sh | 20 +- collectors/charts.d.plugin/tomcat/README.md | 6 +- collectors/charts.d.plugin/tomcat/tomcat.chart.sh | 26 +- collectors/checks.plugin/Makefile.in | 464 - collectors/checks.plugin/README.md | 4 +- collectors/cups.plugin/Makefile.am | 9 + collectors/cups.plugin/README.md | 49 + collectors/cups.plugin/cups_plugin.c | 449 + collectors/diskspace.plugin/Makefile.in | 464 - collectors/diskspace.plugin/README.md | 29 + collectors/diskspace.plugin/plugin_diskspace.c | 6 +- collectors/fping.plugin/Makefile.in | 591 -- collectors/fping.plugin/README.md | 2 + collectors/fping.plugin/fping.plugin | 200 - collectors/freebsd.plugin/Makefile.in | 464 - collectors/freebsd.plugin/README.md | 4 +- collectors/freebsd.plugin/freebsd_devstat.c | 50 +- collectors/freebsd.plugin/freebsd_getifaddrs.c | 52 +- collectors/freebsd.plugin/freebsd_getmntinfo.c | 12 +- collectors/freebsd.plugin/freebsd_ipfw.c | 8 +- collectors/freebsd.plugin/freebsd_kstat_zfs.c | 112 +- collectors/freebsd.plugin/freebsd_sysctl.c | 60 +- collectors/freeipmi.plugin/Makefile.in | 464 - collectors/freeipmi.plugin/README.md | 14 +- collectors/freeipmi.plugin/freeipmi_plugin.c | 44 +- collectors/idlejitter.plugin/Makefile.in | 464 - collectors/idlejitter.plugin/README.md | 6 +- collectors/macos.plugin/Makefile.in | 464 - collectors/macos.plugin/README.md | 4 +- collectors/macos.plugin/macos_fw.c | 14 +- collectors/macos.plugin/macos_mach_smi.c | 12 +- collectors/macos.plugin/macos_sysctl.c | 4 +- collectors/nfacct.plugin/Makefile.in | 464 - collectors/nfacct.plugin/README.md | 2 + collectors/node.d.plugin/.keep | 0 collectors/node.d.plugin/Makefile.am | 3 +- collectors/node.d.plugin/Makefile.in | 805 -- collectors/node.d.plugin/README.md | 2 + collectors/node.d.plugin/fronius/README.md | 2 + collectors/node.d.plugin/named/README.md | 2 + collectors/node.d.plugin/node.d.plugin | 303 - collectors/node.d.plugin/sma_webbox/README.md | 4 + .../node.d.plugin/sma_webbox/sma_webbox.node.js | 13 +- collectors/node.d.plugin/snmp/README.md | 26 +- collectors/node.d.plugin/snmp/snmp.node.js | 4 +- collectors/node.d.plugin/stiebeleltron/README.md | 2 + collectors/plugins.d/Makefile.in | 647 -- collectors/plugins.d/README.md | 11 +- collectors/proc.plugin/Makefile.in | 464 - collectors/proc.plugin/README.md | 112 +- collectors/proc.plugin/plugin_proc.c | 4 + collectors/proc.plugin/plugin_proc.h | 2 + collectors/proc.plugin/proc_diskstats.c | 20 +- collectors/proc.plugin/proc_mdstat.c | 641 ++ collectors/proc.plugin/proc_meminfo.c | 22 +- collectors/proc.plugin/proc_net_rpc_nfsd.c | 2 +- collectors/proc.plugin/proc_net_sockstat.c | 6 +- collectors/proc.plugin/proc_net_stat_synproxy.c | 2 +- collectors/proc.plugin/proc_spl_kstat_zfs.c | 43 +- collectors/proc.plugin/proc_stat.c | 347 +- collectors/proc.plugin/proc_vmstat.c | 6 +- collectors/proc.plugin/sys_class_power_supply.c | 383 + collectors/proc.plugin/sys_fs_btrfs.c | 8 +- collectors/proc.plugin/sys_kernel_mm_ksm.c | 4 +- collectors/proc.plugin/zfs_common.c | 104 +- collectors/proc.plugin/zfs_common.h | 4 +- collectors/python.d.plugin/.keep | 0 collectors/python.d.plugin/Makefile.am | 3 +- collectors/python.d.plugin/Makefile.in | 2025 ---- collectors/python.d.plugin/README.md | 55 +- collectors/python.d.plugin/adaptec_raid/README.md | 2 + .../python.d.plugin/adaptec_raid/adaptec_raid.conf | 10 +- collectors/python.d.plugin/apache/README.md | 4 +- collectors/python.d.plugin/apache/apache.chart.py | 114 +- collectors/python.d.plugin/apache/apache.conf | 10 +- collectors/python.d.plugin/beanstalk/README.md | 2 + .../python.d.plugin/beanstalk/beanstalk.chart.py | 17 +- .../python.d.plugin/beanstalk/beanstalk.conf | 10 +- collectors/python.d.plugin/bind_rndc/README.md | 2 + .../python.d.plugin/bind_rndc/bind_rndc.chart.py | 30 +- .../python.d.plugin/bind_rndc/bind_rndc.conf | 10 +- collectors/python.d.plugin/boinc/README.md | 2 + collectors/python.d.plugin/boinc/boinc.chart.py | 14 +- collectors/python.d.plugin/boinc/boinc.conf | 10 +- collectors/python.d.plugin/ceph/README.md | 2 + collectors/python.d.plugin/ceph/ceph.chart.py | 19 +- collectors/python.d.plugin/ceph/ceph.conf | 10 +- collectors/python.d.plugin/chrony/README.md | 2 + collectors/python.d.plugin/chrony/chrony.chart.py | 16 +- collectors/python.d.plugin/chrony/chrony.conf | 10 +- collectors/python.d.plugin/couchdb/README.md | 2 + .../python.d.plugin/couchdb/couchdb.chart.py | 47 +- collectors/python.d.plugin/couchdb/couchdb.conf | 10 +- collectors/python.d.plugin/cpufreq/README.md | 7 + collectors/python.d.plugin/cpufreq/cpufreq.conf | 8 +- collectors/python.d.plugin/cpuidle/README.md | 2 + collectors/python.d.plugin/cpuidle/cpuidle.conf | 8 +- .../python.d.plugin/dns_query_time/README.md | 2 + .../dns_query_time/dns_query_time.chart.py | 21 +- .../dns_query_time/dns_query_time.conf | 10 +- collectors/python.d.plugin/dnsdist/README.md | 2 + .../python.d.plugin/dnsdist/dnsdist.chart.py | 4 +- collectors/python.d.plugin/dnsdist/dnsdist.conf | 10 +- collectors/python.d.plugin/dockerd/README.md | 4 +- .../python.d.plugin/dockerd/dockerd.chart.py | 26 +- collectors/python.d.plugin/dockerd/dockerd.conf | 10 +- collectors/python.d.plugin/dovecot/README.md | 6 + .../python.d.plugin/dovecot/dovecot.chart.py | 25 +- collectors/python.d.plugin/dovecot/dovecot.conf | 14 +- collectors/python.d.plugin/elasticsearch/README.md | 2 + .../elasticsearch/elasticsearch.chart.py | 83 +- .../elasticsearch/elasticsearch.conf | 10 +- collectors/python.d.plugin/example/README.md | 6 +- .../python.d.plugin/example/example.chart.py | 9 +- collectors/python.d.plugin/example/example.conf | 10 +- collectors/python.d.plugin/exim/README.md | 2 + collectors/python.d.plugin/exim/exim.chart.py | 13 +- collectors/python.d.plugin/exim/exim.conf | 10 +- collectors/python.d.plugin/fail2ban/README.md | 2 + .../python.d.plugin/fail2ban/fail2ban.chart.py | 24 +- collectors/python.d.plugin/fail2ban/fail2ban.conf | 10 +- collectors/python.d.plugin/freeradius/README.md | 2 + .../python.d.plugin/freeradius/freeradius.chart.py | 108 +- .../python.d.plugin/freeradius/freeradius.conf | 10 +- collectors/python.d.plugin/go_expvar/README.md | 3 +- .../python.d.plugin/go_expvar/go_expvar.chart.py | 69 +- .../python.d.plugin/go_expvar/go_expvar.conf | 10 +- collectors/python.d.plugin/haproxy/README.md | 2 + .../python.d.plugin/haproxy/haproxy.chart.py | 33 +- collectors/python.d.plugin/haproxy/haproxy.conf | 10 +- collectors/python.d.plugin/hddtemp/README.md | 2 + .../python.d.plugin/hddtemp/hddtemp.chart.py | 9 +- collectors/python.d.plugin/hddtemp/hddtemp.conf | 10 +- collectors/python.d.plugin/httpcheck/README.md | 2 + .../python.d.plugin/httpcheck/httpcheck.chart.py | 13 +- .../python.d.plugin/httpcheck/httpcheck.conf | 6 +- collectors/python.d.plugin/icecast/README.md | 2 + .../python.d.plugin/icecast/icecast.chart.py | 8 +- collectors/python.d.plugin/icecast/icecast.conf | 10 +- collectors/python.d.plugin/ipfs/README.md | 2 + collectors/python.d.plugin/ipfs/ipfs.chart.py | 36 +- collectors/python.d.plugin/ipfs/ipfs.conf | 10 +- collectors/python.d.plugin/isc_dhcpd/README.md | 2 + .../python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py | 30 +- .../python.d.plugin/isc_dhcpd/isc_dhcpd.conf | 10 +- .../python.d.plugin/linux_power_supply/README.md | 9 +- .../linux_power_supply/linux_power_supply.conf | 10 +- collectors/python.d.plugin/litespeed/README.md | 2 + .../python.d.plugin/litespeed/litespeed.chart.py | 14 +- .../python.d.plugin/litespeed/litespeed.conf | 10 +- collectors/python.d.plugin/logind/README.md | 2 + collectors/python.d.plugin/logind/logind.chart.py | 10 +- collectors/python.d.plugin/logind/logind.conf | 10 +- collectors/python.d.plugin/mdstat/README.md | 7 + collectors/python.d.plugin/mdstat/mdstat.conf | 8 +- collectors/python.d.plugin/megacli/README.md | 2 + .../python.d.plugin/megacli/megacli.chart.py | 4 +- collectors/python.d.plugin/megacli/megacli.conf | 10 +- collectors/python.d.plugin/memcached/README.md | 2 + .../python.d.plugin/memcached/memcached.chart.py | 48 +- .../python.d.plugin/memcached/memcached.conf | 10 +- collectors/python.d.plugin/mongodb/README.md | 29 + .../python.d.plugin/mongodb/mongodb.chart.py | 22 +- collectors/python.d.plugin/mongodb/mongodb.conf | 10 +- collectors/python.d.plugin/monit/README.md | 2 + collectors/python.d.plugin/monit/monit.chart.py | 26 +- collectors/python.d.plugin/monit/monit.conf | 10 +- collectors/python.d.plugin/mysql/README.md | 4 +- collectors/python.d.plugin/mysql/mysql.chart.py | 121 +- collectors/python.d.plugin/mysql/mysql.conf | 11 +- collectors/python.d.plugin/nginx/README.md | 3 +- collectors/python.d.plugin/nginx/nginx.chart.py | 30 +- collectors/python.d.plugin/nginx/nginx.conf | 10 +- collectors/python.d.plugin/nginx_plus/README.md | 2 + .../python.d.plugin/nginx_plus/nginx_plus.chart.py | 18 +- .../python.d.plugin/nginx_plus/nginx_plus.conf | 10 +- collectors/python.d.plugin/nsd/README.md | 2 + collectors/python.d.plugin/nsd/nsd.chart.py | 30 +- collectors/python.d.plugin/nsd/nsd.conf | 10 +- collectors/python.d.plugin/ntpd/README.md | 2 + collectors/python.d.plugin/ntpd/ntpd.chart.py | 28 +- collectors/python.d.plugin/ntpd/ntpd.conf | 10 +- collectors/python.d.plugin/nvidia_smi/README.md | 3 +- .../python.d.plugin/nvidia_smi/nvidia_smi.chart.py | 29 +- .../python.d.plugin/nvidia_smi/nvidia_smi.conf | 10 +- collectors/python.d.plugin/openldap/README.md | 2 + .../python.d.plugin/openldap/openldap.chart.py | 6 +- collectors/python.d.plugin/openldap/openldap.conf | 10 +- .../python.d.plugin/ovpn_status_log/README.md | 2 + .../ovpn_status_log/ovpn_status_log.chart.py | 24 +- .../ovpn_status_log/ovpn_status_log.conf | 10 +- collectors/python.d.plugin/phpfpm/README.md | 3 +- collectors/python.d.plugin/phpfpm/phpfpm.chart.py | 41 +- collectors/python.d.plugin/phpfpm/phpfpm.conf | 10 +- collectors/python.d.plugin/portcheck/README.md | 2 + .../python.d.plugin/portcheck/portcheck.chart.py | 7 +- .../python.d.plugin/portcheck/portcheck.conf | 6 +- collectors/python.d.plugin/postfix/README.md | 2 + .../python.d.plugin/postfix/postfix.chart.py | 15 +- collectors/python.d.plugin/postfix/postfix.conf | 10 +- collectors/python.d.plugin/postgres/README.md | 2 + .../python.d.plugin/postgres/postgres.chart.py | 699 +- collectors/python.d.plugin/postgres/postgres.conf | 22 +- collectors/python.d.plugin/powerdns/README.md | 2 + .../python.d.plugin/powerdns/powerdns.chart.py | 11 +- collectors/python.d.plugin/powerdns/powerdns.conf | 10 +- collectors/python.d.plugin/proxysql/README.md | 2 + .../python.d.plugin/proxysql/proxysql.chart.py | 13 +- collectors/python.d.plugin/proxysql/proxysql.conf | 10 +- collectors/python.d.plugin/puppet/README.md | 7 +- collectors/python.d.plugin/puppet/puppet.chart.py | 28 +- collectors/python.d.plugin/puppet/puppet.conf | 12 +- collectors/python.d.plugin/python.d.conf | 6 + collectors/python.d.plugin/python.d.plugin | 427 - collectors/python.d.plugin/python.d.plugin.in | 4 +- .../bases/FrameworkServices/MySQLService.py | 30 +- .../bases/FrameworkServices/SimpleService.py | 109 +- .../bases/FrameworkServices/SocketService.py | 10 +- .../bases/FrameworkServices/UrlService.py | 19 +- .../python.d.plugin/python_modules/bases/charts.py | 2 +- .../python_modules/bases/loggers.py | 2 +- collectors/python.d.plugin/rabbitmq/README.md | 2 + .../python.d.plugin/rabbitmq/rabbitmq.chart.py | 122 +- collectors/python.d.plugin/rabbitmq/rabbitmq.conf | 10 +- collectors/python.d.plugin/redis/README.md | 2 + collectors/python.d.plugin/redis/redis.chart.py | 13 +- collectors/python.d.plugin/redis/redis.conf | 10 +- collectors/python.d.plugin/rethinkdbs/README.md | 2 + .../python.d.plugin/rethinkdbs/rethinkdbs.chart.py | 2 - .../python.d.plugin/rethinkdbs/rethinkdbs.conf | 10 +- collectors/python.d.plugin/retroshare/README.md | 2 + .../python.d.plugin/retroshare/retroshare.chart.py | 21 +- .../python.d.plugin/retroshare/retroshare.conf | 10 +- collectors/python.d.plugin/samba/README.md | 2 + collectors/python.d.plugin/samba/samba.chart.py | 7 +- collectors/python.d.plugin/samba/samba.conf | 10 +- collectors/python.d.plugin/sensors/README.md | 2 + .../python.d.plugin/sensors/sensors.chart.py | 4 +- collectors/python.d.plugin/sensors/sensors.conf | 8 +- collectors/python.d.plugin/smartd_log/README.md | 2 + .../python.d.plugin/smartd_log/smartd_log.chart.py | 22 +- .../python.d.plugin/smartd_log/smartd_log.conf | 10 +- collectors/python.d.plugin/spigotmc/README.md | 2 + .../python.d.plugin/spigotmc/spigotmc.chart.py | 5 +- collectors/python.d.plugin/spigotmc/spigotmc.conf | 10 +- collectors/python.d.plugin/springboot/README.md | 2 + .../python.d.plugin/springboot/springboot.chart.py | 15 +- .../python.d.plugin/springboot/springboot.conf | 10 +- collectors/python.d.plugin/squid/README.md | 2 + collectors/python.d.plugin/squid/squid.chart.py | 13 +- collectors/python.d.plugin/squid/squid.conf | 10 +- collectors/python.d.plugin/tomcat/README.md | 2 + collectors/python.d.plugin/tomcat/tomcat.chart.py | 63 +- collectors/python.d.plugin/tomcat/tomcat.conf | 10 +- collectors/python.d.plugin/tor/README.md | 2 + collectors/python.d.plugin/tor/tor.chart.py | 4 +- collectors/python.d.plugin/tor/tor.conf | 10 +- collectors/python.d.plugin/traefik/README.md | 3 +- .../python.d.plugin/traefik/traefik.chart.py | 29 +- collectors/python.d.plugin/traefik/traefik.conf | 10 +- collectors/python.d.plugin/unbound/README.md | 2 + .../python.d.plugin/unbound/unbound.chart.py | 6 +- collectors/python.d.plugin/unbound/unbound.conf | 10 +- collectors/python.d.plugin/uwsgi/README.md | 2 + collectors/python.d.plugin/uwsgi/uwsgi.chart.py | 22 +- collectors/python.d.plugin/uwsgi/uwsgi.conf | 10 +- collectors/python.d.plugin/varnish/README.md | 10 +- .../python.d.plugin/varnish/varnish.chart.py | 34 +- collectors/python.d.plugin/varnish/varnish.conf | 14 +- collectors/python.d.plugin/w1sensor/README.md | 2 + .../python.d.plugin/w1sensor/w1sensor.chart.py | 4 +- collectors/python.d.plugin/w1sensor/w1sensor.conf | 10 +- collectors/python.d.plugin/web_log/README.md | 4 +- .../python.d.plugin/web_log/web_log.chart.py | 6 +- collectors/python.d.plugin/web_log/web_log.conf | 10 +- collectors/statsd.plugin/.keep | 0 collectors/statsd.plugin/Makefile.am | 3 +- collectors/statsd.plugin/Makefile.in | 556 -- collectors/statsd.plugin/README.md | 16 +- collectors/statsd.plugin/statsd.c | 2 +- collectors/tc.plugin/Makefile.in | 562 -- collectors/tc.plugin/README.md | 65 +- collectors/tc.plugin/tc-qos-helper.sh | 315 - collectors/tc.plugin/tc-qos-helper.sh.in | 309 +- compile | 347 - config.guess | 1558 ---- config.h.in | 329 - config.sub | 1791 ---- configure | 9668 -------------------- configure.ac | 98 +- contrib/Makefile.am | 33 - contrib/Makefile.in | 491 - contrib/README.md | 2 + contrib/debian/changelog | 3 - contrib/rhel/build-netdata-rpm.sh | 2 +- contrib/sles11/README.md | 11 + contrib/sles11/alarm-notify-basic.bash3.sh | 755 ++ contrib/sles11/netdata-alarms-bash3.patch | 10 + contrib/sles11/netdata-automake-no-dist-xz.patch | 13 + contrib/sles11/netdata-python-plugin-sles11.patch | 28 + contrib/sles11/netdata.init | 65 + daemon/Makefile.am | 11 + daemon/Makefile.in | 465 - daemon/README.md | 36 +- daemon/anonymous-statistics.sh.in | 197 + daemon/common.h | 1 + daemon/config/README.md | 229 +- daemon/daemon.h | 1 + daemon/global_statistics.c | 2 +- daemon/main.c | 71 +- daemon/main.h | 1 + database/Makefile.in | 464 - database/README.md | 4 +- database/rrdcalc.h | 2 + database/rrdhost.c | 5 +- database/rrdsetvar.c | 4 +- database/rrdvar.c | 2 +- depcomp | 791 -- diagrams/Makefile.in | 482 - diagrams/data_structures/README.md | 2 + diagrams/data_structures/netdata_config.svg | 0 diagrams/data_structures/registry.svg | 0 diagrams/data_structures/rrd.svg | 0 diagrams/data_structures/src/netdata_config.xml | 0 diagrams/data_structures/src/registry.xml | 0 diagrams/data_structures/src/rrd.xml | 0 diagrams/data_structures/src/web.xml | 0 diagrams/data_structures/web.svg | 0 doc/Add-more-charts-to-netdata.md | 429 - doc/Demo-Sites.md | 19 - doc/Donations-netdata-has-received.md | 23 - doc/Netdata-Security-and-Disclosure-Information.md | 37 - doc/Performance.md | 73 - doc/Running-behind-apache.md | 268 - doc/Running-behind-caddy.md | 27 - doc/Running-behind-lighttpd.md | 60 - doc/Running-behind-nginx.md | 202 - doc/Third-Party-Plugins.md | 29 - doc/Why-Netdata.md | 170 - doc/a-github-star-is-important.md | 13 - doc/high-performance-netdata.md | 149 - doc/netdata-for-IoT.md | 199 - doc/netdata-security.md | 179 - docker/Dockerfile | 104 - docker/README.md | 117 - docker/build.sh | 74 - docker/run.sh | 11 - docs/Add-more-charts-to-netdata.md | 438 + docs/Charts.md | 27 + docs/Demo-Sites.md | 21 + docs/Donations-netdata-has-received.md | 25 + docs/GettingStarted.md | 182 + .../Netdata-Security-and-Disclosure-Information.md | 39 + docs/Performance.md | 224 + docs/Running-behind-apache.md | 270 + docs/Running-behind-caddy.md | 29 + docs/Running-behind-lighttpd.md | 62 + docs/Running-behind-nginx.md | 204 + docs/Third-Party-Plugins.md | 31 + docs/a-github-star-is-important.md | 15 + docs/anonymous-statistics.md | 62 + docs/configuration-guide.md | 122 + docs/generator/buildhtml.sh | 60 + docs/generator/buildyaml.sh | 238 + docs/generator/checklinks.sh | 394 + docs/generator/custom/css/netdata.css | 3 + docs/generator/custom/img/favicon.ico | Bin 0 -> 1150 bytes .../generator/custom/javascripts/cookie-consent.js | 15 + .../custom/themes/material/partials/footer.html | 54 + docs/generator/requirements.txt | 2 + docs/generator/runtime.txt | 1 + docs/high-performance-netdata.md | 151 + docs/netdata-for-IoT.md | 41 + docs/netdata-security.md | 183 + docs/privacy-policy.md | 115 + docs/terms-of-use.md | 161 + docs/why-netdata/1s-granularity.md | 53 + docs/why-netdata/README.md | 30 + docs/why-netdata/immediate-results.md | 41 + docs/why-netdata/meaningful-presentation.md | 63 + docs/why-netdata/unlimited-metrics.md | 44 + health/.keep | 0 health/Makefile.am | 3 +- health/Makefile.in | 800 -- health/README.md | 63 +- health/health.c | 784 +- health/health.d/linux_power_supply.conf | 2 +- health/health.d/mdstat.conf | 10 + health/health.d/web_log.conf | 30 + health/health.h | 70 +- health/health_config.c | 5 +- health/health_json.c | 6 + health/health_log.c | 1 - health/notifications/Makefile.in | 754 -- health/notifications/README.md | 6 + health/notifications/alarm-notify.sh | 2407 ----- health/notifications/alarm-notify.sh.in | 785 +- health/notifications/alerta/README.md | 6 +- health/notifications/awssns/README.md | 6 +- health/notifications/discord/README.md | 4 +- health/notifications/email/README.md | 6 +- health/notifications/flock/README.md | 6 +- health/notifications/health_alarm_notify.conf | 79 +- health/notifications/irc/README.md | 6 +- health/notifications/kavenegar/README.md | 6 +- health/notifications/messagebird/README.md | 5 +- health/notifications/pagerduty/README.md | 3 + health/notifications/prowl/Makefile.inc | 12 + health/notifications/prowl/README.md | 22 + health/notifications/pushbullet/README.md | 6 +- health/notifications/pushover/README.md | 7 +- health/notifications/rocketchat/README.md | 4 +- health/notifications/slack/README.md | 23 +- health/notifications/syslog/README.md | 4 +- health/notifications/telegram/README.md | 4 +- health/notifications/twilio/README.md | 4 +- health/notifications/web/README.md | 4 +- htmldoc/buildhtml.sh | 33 - htmldoc/buildyaml.sh | 171 - htmldoc/themes/material/partials/footer.html | 57 - install-sh | 527 -- installer/.keep | 0 installer/README.md | 366 - installer/UNINSTALL.md | 36 - installer/UPDATE.md | 71 - installer/functions.sh | 866 -- kickstart-static64.sh | 252 - kickstart.sh | 377 - libnetdata/Makefile.in | 664 -- libnetdata/README.md | 2 + libnetdata/adaptive_resortable_list/Makefile.in | 464 - libnetdata/adaptive_resortable_list/README.md | 2 + libnetdata/avl/Makefile.in | 464 - libnetdata/avl/README.md | 3 +- libnetdata/buffer/Makefile.in | 464 - libnetdata/buffer/README.md | 3 +- libnetdata/clocks/Makefile.in | 464 - libnetdata/clocks/README.md | 2 + libnetdata/config/Makefile.in | 464 - libnetdata/config/README.md | 2 + libnetdata/config/appconfig.c | 1 + libnetdata/config/appconfig.h | 1 + libnetdata/dictionary/Makefile.in | 464 - libnetdata/dictionary/README.md | 2 + libnetdata/dictionary/dictionary.c | 35 + libnetdata/dictionary/dictionary.h | 1 + libnetdata/eval/Makefile.in | 464 - libnetdata/eval/README.md | 2 + libnetdata/libnetdata.c | 21 +- libnetdata/libnetdata.h | 2 +- libnetdata/locks/Makefile.in | 464 - libnetdata/locks/README.md | 2 + libnetdata/log/Makefile.in | 464 - libnetdata/log/README.md | 2 + libnetdata/log/log.c | 9 + libnetdata/log/log.h | 1 + libnetdata/popen/Makefile.in | 464 - libnetdata/popen/README.md | 2 + libnetdata/procfile/Makefile.in | 464 - libnetdata/procfile/README.md | 2 + libnetdata/simple_pattern/Makefile.in | 464 - libnetdata/simple_pattern/README.md | 2 + libnetdata/socket/Makefile.in | 464 - libnetdata/socket/README.md | 2 + libnetdata/socket/socket.c | 67 +- libnetdata/socket/socket.h | 21 + libnetdata/statistical/Makefile.in | 464 - libnetdata/statistical/README.md | 2 + libnetdata/storage_number/Makefile.in | 464 - libnetdata/storage_number/README.md | 2 + libnetdata/threads/Makefile.in | 464 - libnetdata/threads/README.md | 2 + libnetdata/url/Makefile.in | 464 - libnetdata/url/README.md | 2 + makeself/Makefile.am | 26 - makeself/Makefile.in | 485 - makeself/build-x86_64-static.sh | 42 - makeself/build.sh | 61 - makeself/functions.sh | 62 - makeself/install-alpine-packages.sh | 27 - makeself/install-or-update.sh | 225 - makeself/jobs/10-prepare-destination.install.sh | 17 - makeself/jobs/50-bash-4.4.18.install.sh | 54 - makeself/jobs/50-curl-7.60.0.install.sh | 34 - makeself/jobs/50-fping-4.0.install.sh | 29 - makeself/jobs/70-netdata-git.install.sh | 26 - makeself/jobs/99-makeself.install.sh | 117 - makeself/makeself-header.sh | 554 -- makeself/makeself-help-header.txt | 46 - makeself/makeself-license.txt | 46 - makeself/makeself.lsm | 16 - makeself/makeself.sh | 621 -- makeself/post-installer.sh | 11 - makeself/run-all-jobs.sh | 42 - missing | 215 - netdata-installer.sh | 1289 ++- netdata.spec | 244 - netdata.spec.in | 16 +- netlify.toml | 12 + packaging/docker/Dockerfile | 113 + packaging/docker/README.md | 126 + packaging/docker/build.sh | 83 + packaging/docker/run.sh | 16 + packaging/go.d.checksums | 16 + packaging/installer/.keep | 0 packaging/installer/README.md | 413 + packaging/installer/UNINSTALL.md | 22 + packaging/installer/UPDATE.md | 55 + packaging/installer/functions.sh | 797 ++ packaging/installer/kickstart-static64.sh | 252 + packaging/installer/kickstart.sh | 272 + packaging/installer/netdata-uninstaller.sh | 169 + packaging/installer/netdata-updater.sh | 115 + packaging/maintainers/README.md | 75 + packaging/makeself/README.md | 48 + packaging/makeself/build-x86_64-static.sh | 42 + packaging/makeself/build.sh | 61 + packaging/makeself/functions.sh | 62 + packaging/makeself/install-alpine-packages.sh | 27 + packaging/makeself/install-or-update.sh | 225 + .../jobs/10-prepare-destination.install.sh | 16 + packaging/makeself/jobs/50-bash-4.4.18.install.sh | 54 + packaging/makeself/jobs/50-curl-7.60.0.install.sh | 34 + packaging/makeself/jobs/50-fping-4.0.install.sh | 29 + packaging/makeself/jobs/70-netdata-git.install.sh | 26 + packaging/makeself/jobs/99-makeself.install.sh | 99 + packaging/makeself/makeself-header.sh | 554 ++ packaging/makeself/makeself-help-header.txt | 44 + packaging/makeself/makeself-license.txt | 44 + packaging/makeself/makeself.lsm | 16 + packaging/makeself/makeself.sh | 621 ++ packaging/makeself/post-installer.sh | 11 + packaging/makeself/run-all-jobs.sh | 42 + packaging/version | 1 + registry/Makefile.in | 464 - registry/README.md | 16 +- registry/registry.c | 14 +- registry/registry.h | 1 + registry/registry_init.c | 3 + registry/registry_internals.h | 1 + registry/registry_person.c | 2 +- registry/registry_url.c | 2 +- requirements.txt | 3 - runtime.txt | 1 - streaming/Makefile.in | 521 -- streaming/README.md | 23 +- streaming/rrdpush.c | 2 +- system/Makefile.in | 588 -- system/edit-config | 101 - system/netdata-freebsd.in | 2 +- tests/Makefile.am | 14 + tests/Makefile.in | 478 - tests/README.md | 3 + tests/health_mgmtapi/README.md | 13 + tests/health_mgmtapi/health-cmdapi-test.sh.in | 263 + tests/health_mgmtapi/python-example.conf | 16 + tests/lifecycle.bats | 27 + tests/profile/Makefile | 53 + tests/profile/benchmark-dictionary.c | 130 + tests/profile/benchmark-line-parsing.c | 707 ++ tests/profile/benchmark-procfile-parser.c | 329 + tests/profile/benchmark-registry.c | 227 + tests/profile/benchmark-value-pairs.c | 623 ++ tests/profile/statsd-stress.c | 151 + tests/profile/test-eval.c | 299 + web/Makefile.in | 652 -- web/README.md | 16 +- web/api/Makefile.am | 1 + web/api/Makefile.in | 709 -- web/api/README.md | 2 + web/api/badges/Makefile.in | 464 - web/api/badges/README.md | 20 +- web/api/badges/web_buffer_svg.c | 2 +- web/api/exporters/Makefile.in | 649 -- web/api/exporters/README.md | 2 + web/api/exporters/allmetrics.c | 2 +- web/api/exporters/prometheus/Makefile.in | 464 - web/api/exporters/prometheus/README.md | 2 + web/api/exporters/shell/Makefile.in | 464 - web/api/exporters/shell/README.md | 2 + web/api/formatters/Makefile.in | 651 -- web/api/formatters/README.md | 2 + web/api/formatters/csv/Makefile.in | 464 - web/api/formatters/csv/README.md | 2 + web/api/formatters/json/Makefile.in | 464 - web/api/formatters/json/README.md | 2 + web/api/formatters/ssv/Makefile.in | 464 - web/api/formatters/ssv/README.md | 2 + web/api/formatters/value/Makefile.in | 464 - web/api/formatters/value/README.md | 2 + web/api/health/Makefile.am | 8 + web/api/health/README.md | 163 + web/api/health/health_cmdapi.c | 166 + web/api/health/health_cmdapi.h | 31 + web/api/netdata-swagger.json | 455 +- web/api/netdata-swagger.yaml | 326 +- web/api/queries/Makefile.in | 656 -- web/api/queries/README.md | 48 +- web/api/queries/average/Makefile.in | 464 - web/api/queries/average/README.md | 2 + web/api/queries/des/Makefile.in | 464 - web/api/queries/des/README.md | 2 + web/api/queries/incremental_sum/Makefile.in | 464 - web/api/queries/incremental_sum/README.md | 2 + web/api/queries/max/Makefile.in | 464 - web/api/queries/max/README.md | 2 + web/api/queries/median/Makefile.in | 464 - web/api/queries/median/README.md | 2 + web/api/queries/min/Makefile.in | 464 - web/api/queries/min/README.md | 2 + web/api/queries/query.c | 51 +- web/api/queries/ses/Makefile.in | 464 - web/api/queries/ses/README.md | 2 + web/api/queries/stddev/Makefile.in | 464 - web/api/queries/stddev/README.md | 2 + web/api/queries/sum/Makefile.in | 464 - web/api/queries/sum/README.md | 2 + web/api/web_api_v1.c | 140 +- web/api/web_api_v1.h | 5 + web/gui/.well-known/dnt/cookies | 14 + web/gui/Makefile.am | 4 +- web/gui/Makefile.in | 838 -- web/gui/README.md | 4 +- web/gui/browserconfig.xml | 2 + web/gui/confluence/README.md | 4 +- web/gui/custom/README.md | 6 +- web/gui/dashboard.html | 2 +- web/gui/dashboard.js | 116 +- web/gui/dashboard_info.js | 28 +- web/gui/dashboard_info_custom_example.js | 3 + web/gui/demo.html | 51 + web/gui/demo2.html | 143 + web/gui/demosites.html | 1501 +++ web/gui/demosites2.html | 1112 +++ web/gui/fonts/glyphicons-halflings-regular.eot | Bin 0 -> 20127 bytes web/gui/fonts/glyphicons-halflings-regular.svg | 289 + web/gui/fonts/glyphicons-halflings-regular.ttf | Bin 0 -> 45404 bytes web/gui/fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes web/gui/fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes web/gui/goto-host-from-alarm.html | 9 +- web/gui/images/netdata-logomark.svg | 3 + web/gui/images/seo-performance-128.png | Bin 0 -> 1828 bytes web/gui/index.html | 144 +- web/gui/lib/c3-0.4.18.min.js | 2 - web/gui/lib/morris-0.5.1.min.js | 8 - web/gui/lib/raphael-2.2.4-min.js | 4 - web/gui/main.css | 64 +- web/gui/main.js | 955 +- web/gui/manifest.json | 41 + web/gui/src/dashboard.js/charting/_c3.js | 114 + web/gui/src/dashboard.js/charting/_morris.js | 81 + web/gui/src/dashboard.js/charting/_raphael.js | 48 + web/gui/src/dashboard.js/charting/dygraph.js | 4 + web/gui/src/dashboard.js/common.js | 4 +- web/gui/src/dashboard.js/main.js | 3 + web/gui/src/dashboard.js/options.js | 4 +- web/gui/src/dashboard.js/prologue.js.inc | 3 +- web/gui/src/dashboard.js/registry.js | 47 +- web/gui/src/dashboard.js/themes.js | 2 + web/gui/src/dashboard.js/units-conversion.js | 45 +- web/gui/src/dashboard.js/utils.js | 2 +- web/gui/src/dashboard.js/xss.js | 2 +- web/gui/version.txt | 1 - web/server/Makefile.am | 2 - web/server/Makefile.in | 650 -- web/server/README.md | 78 +- web/server/multi/Makefile.am | 11 - web/server/multi/Makefile.in | 647 -- web/server/multi/README.md | 8 - web/server/multi/multi-threaded.c | 314 - web/server/multi/multi-threaded.h | 10 - web/server/single/Makefile.am | 11 - web/server/single/Makefile.in | 647 -- web/server/single/README.md | 6 - web/server/single/single-threaded.c | 194 - web/server/single/single-threaded.h | 10 - web/server/static/Makefile.in | 647 -- web/server/static/README.md | 3 +- web/server/static/static-threaded.c | 17 +- web/server/web_client.c | 42 +- web/server/web_client.h | 21 +- web/server/web_server.c | 106 +- web/server/web_server.h | 7 +- 789 files changed, 29059 insertions(+), 80441 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/question.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/stale.yml create mode 100644 .travis.yml delete mode 100755 .travis/containerized_build.sh create mode 100644 .travis/gcs-credentials.json.enc delete mode 100644 .travis/images/Dockerfile.alpine delete mode 100644 .travis/images/Dockerfile.centos6 delete mode 100644 .travis/images/Dockerfile.centos7 delete mode 100644 .travis/images/Dockerfile.ubuntu1804 create mode 100755 .travis/nightlies.sh create mode 100644 CHANGELOG.md create mode 100644 CMakeLists.txt create mode 100644 HISTORICAL_CHANGELOG.md delete mode 100644 Makefile.in delete mode 100644 aclocal.m4 delete mode 100644 backends/Makefile.in delete mode 100644 backends/graphite/Makefile.in delete mode 100644 backends/json/Makefile.in delete mode 100644 backends/opentsdb/Makefile.in delete mode 100644 backends/prometheus/Makefile.in delete mode 100644 build/Dockerfile delete mode 100644 collectors/Makefile.in delete mode 100644 collectors/apps.plugin/Makefile.in delete mode 100644 collectors/cgroups.plugin/Makefile.in delete mode 100644 collectors/cgroups.plugin/cgroup-name.sh create mode 100644 collectors/charts.d.plugin/.keep delete mode 100644 collectors/charts.d.plugin/Makefile.in delete mode 100644 collectors/charts.d.plugin/charts.d.plugin delete mode 100644 collectors/checks.plugin/Makefile.in create mode 100644 collectors/cups.plugin/Makefile.am create mode 100644 collectors/cups.plugin/README.md create mode 100644 collectors/cups.plugin/cups_plugin.c delete mode 100644 collectors/diskspace.plugin/Makefile.in delete mode 100644 collectors/fping.plugin/Makefile.in delete mode 100644 collectors/fping.plugin/fping.plugin delete mode 100644 collectors/freebsd.plugin/Makefile.in delete mode 100644 collectors/freeipmi.plugin/Makefile.in delete mode 100644 collectors/idlejitter.plugin/Makefile.in delete mode 100644 collectors/macos.plugin/Makefile.in delete mode 100644 collectors/nfacct.plugin/Makefile.in create mode 100644 collectors/node.d.plugin/.keep delete mode 100644 collectors/node.d.plugin/Makefile.in delete mode 100644 collectors/node.d.plugin/node.d.plugin delete mode 100644 collectors/plugins.d/Makefile.in delete mode 100644 collectors/proc.plugin/Makefile.in mode change 100755 => 100644 collectors/proc.plugin/README.md create mode 100644 collectors/proc.plugin/proc_mdstat.c mode change 100755 => 100644 collectors/proc.plugin/proc_stat.c create mode 100644 collectors/proc.plugin/sys_class_power_supply.c create mode 100644 collectors/python.d.plugin/.keep delete mode 100644 collectors/python.d.plugin/Makefile.in delete mode 100644 collectors/python.d.plugin/python.d.plugin mode change 100755 => 100644 collectors/python.d.plugin/python.d.plugin.in create mode 100644 collectors/statsd.plugin/.keep delete mode 100644 collectors/statsd.plugin/Makefile.in delete mode 100644 collectors/tc.plugin/Makefile.in delete mode 100644 collectors/tc.plugin/tc-qos-helper.sh delete mode 100755 compile delete mode 100755 config.guess delete mode 100644 config.h.in delete mode 100755 config.sub delete mode 100755 configure delete mode 100644 contrib/Makefile.am delete mode 100644 contrib/Makefile.in delete mode 100644 contrib/debian/changelog create mode 100644 contrib/sles11/README.md create mode 100755 contrib/sles11/alarm-notify-basic.bash3.sh create mode 100644 contrib/sles11/netdata-alarms-bash3.patch create mode 100644 contrib/sles11/netdata-automake-no-dist-xz.patch create mode 100644 contrib/sles11/netdata-python-plugin-sles11.patch create mode 100755 contrib/sles11/netdata.init delete mode 100644 daemon/Makefile.in create mode 100755 daemon/anonymous-statistics.sh.in mode change 100755 => 100644 daemon/config/README.md delete mode 100644 database/Makefile.in delete mode 100755 depcomp delete mode 100644 diagrams/Makefile.in mode change 100755 => 100644 diagrams/data_structures/README.md mode change 100755 => 100644 diagrams/data_structures/netdata_config.svg mode change 100755 => 100644 diagrams/data_structures/registry.svg mode change 100755 => 100644 diagrams/data_structures/rrd.svg mode change 100755 => 100644 diagrams/data_structures/src/netdata_config.xml mode change 100755 => 100644 diagrams/data_structures/src/registry.xml mode change 100755 => 100644 diagrams/data_structures/src/rrd.xml mode change 100755 => 100644 diagrams/data_structures/src/web.xml mode change 100755 => 100644 diagrams/data_structures/web.svg delete mode 100644 doc/Add-more-charts-to-netdata.md delete mode 100644 doc/Demo-Sites.md delete mode 100644 doc/Donations-netdata-has-received.md delete mode 100644 doc/Netdata-Security-and-Disclosure-Information.md delete mode 100644 doc/Performance.md delete mode 100644 doc/Running-behind-apache.md delete mode 100644 doc/Running-behind-caddy.md delete mode 100644 doc/Running-behind-lighttpd.md delete mode 100644 doc/Running-behind-nginx.md delete mode 100644 doc/Third-Party-Plugins.md delete mode 100644 doc/Why-Netdata.md delete mode 100644 doc/a-github-star-is-important.md delete mode 100644 doc/high-performance-netdata.md delete mode 100644 doc/netdata-for-IoT.md delete mode 100644 doc/netdata-security.md delete mode 100644 docker/Dockerfile delete mode 100644 docker/README.md delete mode 100755 docker/build.sh delete mode 100644 docker/run.sh create mode 100644 docs/Add-more-charts-to-netdata.md create mode 100644 docs/Charts.md create mode 100644 docs/Demo-Sites.md create mode 100644 docs/Donations-netdata-has-received.md create mode 100644 docs/GettingStarted.md create mode 100644 docs/Netdata-Security-and-Disclosure-Information.md create mode 100644 docs/Performance.md create mode 100644 docs/Running-behind-apache.md create mode 100644 docs/Running-behind-caddy.md create mode 100644 docs/Running-behind-lighttpd.md create mode 100644 docs/Running-behind-nginx.md create mode 100644 docs/Third-Party-Plugins.md create mode 100644 docs/a-github-star-is-important.md create mode 100644 docs/anonymous-statistics.md create mode 100644 docs/configuration-guide.md create mode 100755 docs/generator/buildhtml.sh create mode 100755 docs/generator/buildyaml.sh create mode 100755 docs/generator/checklinks.sh create mode 100644 docs/generator/custom/css/netdata.css create mode 100644 docs/generator/custom/img/favicon.ico create mode 100644 docs/generator/custom/javascripts/cookie-consent.js create mode 100644 docs/generator/custom/themes/material/partials/footer.html create mode 100644 docs/generator/requirements.txt create mode 100644 docs/generator/runtime.txt create mode 100644 docs/high-performance-netdata.md create mode 100644 docs/netdata-for-IoT.md create mode 100644 docs/netdata-security.md create mode 100644 docs/privacy-policy.md create mode 100644 docs/terms-of-use.md create mode 100644 docs/why-netdata/1s-granularity.md create mode 100644 docs/why-netdata/README.md create mode 100644 docs/why-netdata/immediate-results.md create mode 100644 docs/why-netdata/meaningful-presentation.md create mode 100644 docs/why-netdata/unlimited-metrics.md create mode 100644 health/.keep delete mode 100644 health/Makefile.in delete mode 100644 health/notifications/Makefile.in delete mode 100644 health/notifications/alarm-notify.sh create mode 100644 health/notifications/prowl/Makefile.inc create mode 100644 health/notifications/prowl/README.md delete mode 100755 htmldoc/buildhtml.sh delete mode 100755 htmldoc/buildyaml.sh delete mode 100644 htmldoc/themes/material/partials/footer.html delete mode 100755 install-sh delete mode 100644 installer/.keep delete mode 100644 installer/README.md delete mode 100644 installer/UNINSTALL.md delete mode 100644 installer/UPDATE.md delete mode 100644 installer/functions.sh delete mode 100755 kickstart-static64.sh delete mode 100755 kickstart.sh delete mode 100644 libnetdata/Makefile.in delete mode 100644 libnetdata/adaptive_resortable_list/Makefile.in delete mode 100644 libnetdata/avl/Makefile.in delete mode 100644 libnetdata/buffer/Makefile.in delete mode 100644 libnetdata/clocks/Makefile.in delete mode 100644 libnetdata/config/Makefile.in delete mode 100644 libnetdata/dictionary/Makefile.in delete mode 100644 libnetdata/eval/Makefile.in delete mode 100644 libnetdata/locks/Makefile.in delete mode 100644 libnetdata/log/Makefile.in delete mode 100644 libnetdata/popen/Makefile.in delete mode 100644 libnetdata/procfile/Makefile.in delete mode 100644 libnetdata/simple_pattern/Makefile.in delete mode 100644 libnetdata/socket/Makefile.in delete mode 100644 libnetdata/statistical/Makefile.in delete mode 100644 libnetdata/storage_number/Makefile.in delete mode 100644 libnetdata/threads/Makefile.in delete mode 100644 libnetdata/url/Makefile.in delete mode 100644 makeself/Makefile.am delete mode 100644 makeself/Makefile.in delete mode 100755 makeself/build-x86_64-static.sh delete mode 100755 makeself/build.sh delete mode 100755 makeself/functions.sh delete mode 100755 makeself/install-alpine-packages.sh delete mode 100755 makeself/install-or-update.sh delete mode 100755 makeself/jobs/10-prepare-destination.install.sh delete mode 100755 makeself/jobs/50-bash-4.4.18.install.sh delete mode 100755 makeself/jobs/50-curl-7.60.0.install.sh delete mode 100755 makeself/jobs/50-fping-4.0.install.sh delete mode 100755 makeself/jobs/70-netdata-git.install.sh delete mode 100755 makeself/jobs/99-makeself.install.sh delete mode 100755 makeself/makeself-header.sh delete mode 100644 makeself/makeself-help-header.txt delete mode 100644 makeself/makeself-license.txt delete mode 100644 makeself/makeself.lsm delete mode 100755 makeself/makeself.sh delete mode 100755 makeself/post-installer.sh delete mode 100755 makeself/run-all-jobs.sh delete mode 100755 missing delete mode 100644 netdata.spec create mode 100644 netlify.toml create mode 100644 packaging/docker/Dockerfile create mode 100644 packaging/docker/README.md create mode 100755 packaging/docker/build.sh create mode 100644 packaging/docker/run.sh create mode 100644 packaging/go.d.checksums create mode 100644 packaging/installer/.keep create mode 100644 packaging/installer/README.md create mode 100644 packaging/installer/UNINSTALL.md create mode 100644 packaging/installer/UPDATE.md create mode 100644 packaging/installer/functions.sh create mode 100755 packaging/installer/kickstart-static64.sh create mode 100755 packaging/installer/kickstart.sh create mode 100755 packaging/installer/netdata-uninstaller.sh create mode 100644 packaging/installer/netdata-updater.sh create mode 100644 packaging/maintainers/README.md create mode 100644 packaging/makeself/README.md create mode 100755 packaging/makeself/build-x86_64-static.sh create mode 100755 packaging/makeself/build.sh create mode 100755 packaging/makeself/functions.sh create mode 100755 packaging/makeself/install-alpine-packages.sh create mode 100755 packaging/makeself/install-or-update.sh create mode 100755 packaging/makeself/jobs/10-prepare-destination.install.sh create mode 100755 packaging/makeself/jobs/50-bash-4.4.18.install.sh create mode 100755 packaging/makeself/jobs/50-curl-7.60.0.install.sh create mode 100755 packaging/makeself/jobs/50-fping-4.0.install.sh create mode 100755 packaging/makeself/jobs/70-netdata-git.install.sh create mode 100755 packaging/makeself/jobs/99-makeself.install.sh create mode 100755 packaging/makeself/makeself-header.sh create mode 100644 packaging/makeself/makeself-help-header.txt create mode 100644 packaging/makeself/makeself-license.txt create mode 100644 packaging/makeself/makeself.lsm create mode 100755 packaging/makeself/makeself.sh create mode 100755 packaging/makeself/post-installer.sh create mode 100755 packaging/makeself/run-all-jobs.sh create mode 100644 packaging/version delete mode 100644 registry/Makefile.in delete mode 100644 requirements.txt delete mode 100644 runtime.txt delete mode 100644 streaming/Makefile.in delete mode 100644 system/Makefile.in delete mode 100644 system/edit-config delete mode 100644 tests/Makefile.in create mode 100644 tests/health_mgmtapi/README.md create mode 100755 tests/health_mgmtapi/health-cmdapi-test.sh.in create mode 100644 tests/health_mgmtapi/python-example.conf create mode 100755 tests/lifecycle.bats create mode 100644 tests/profile/Makefile create mode 100644 tests/profile/benchmark-dictionary.c create mode 100644 tests/profile/benchmark-line-parsing.c create mode 100644 tests/profile/benchmark-procfile-parser.c create mode 100644 tests/profile/benchmark-registry.c create mode 100644 tests/profile/benchmark-value-pairs.c create mode 100644 tests/profile/statsd-stress.c create mode 100644 tests/profile/test-eval.c delete mode 100644 web/Makefile.in delete mode 100644 web/api/Makefile.in delete mode 100644 web/api/badges/Makefile.in delete mode 100644 web/api/exporters/Makefile.in delete mode 100644 web/api/exporters/prometheus/Makefile.in delete mode 100644 web/api/exporters/shell/Makefile.in delete mode 100644 web/api/formatters/Makefile.in delete mode 100644 web/api/formatters/csv/Makefile.in delete mode 100644 web/api/formatters/json/Makefile.in delete mode 100644 web/api/formatters/ssv/Makefile.in delete mode 100644 web/api/formatters/value/Makefile.in create mode 100644 web/api/health/Makefile.am create mode 100644 web/api/health/README.md create mode 100644 web/api/health/health_cmdapi.c create mode 100644 web/api/health/health_cmdapi.h delete mode 100644 web/api/queries/Makefile.in delete mode 100644 web/api/queries/average/Makefile.in delete mode 100644 web/api/queries/des/Makefile.in delete mode 100644 web/api/queries/incremental_sum/Makefile.in delete mode 100644 web/api/queries/max/Makefile.in delete mode 100644 web/api/queries/median/Makefile.in delete mode 100644 web/api/queries/min/Makefile.in delete mode 100644 web/api/queries/ses/Makefile.in delete mode 100644 web/api/queries/stddev/Makefile.in delete mode 100644 web/api/queries/sum/Makefile.in create mode 100644 web/gui/.well-known/dnt/cookies delete mode 100644 web/gui/Makefile.in create mode 100644 web/gui/browserconfig.xml create mode 100644 web/gui/demo.html create mode 100644 web/gui/demo2.html create mode 100644 web/gui/demosites.html create mode 100644 web/gui/demosites2.html create mode 100644 web/gui/fonts/glyphicons-halflings-regular.eot create mode 100644 web/gui/fonts/glyphicons-halflings-regular.svg create mode 100644 web/gui/fonts/glyphicons-halflings-regular.ttf create mode 100644 web/gui/fonts/glyphicons-halflings-regular.woff create mode 100644 web/gui/fonts/glyphicons-halflings-regular.woff2 create mode 100644 web/gui/images/netdata-logomark.svg create mode 100644 web/gui/images/seo-performance-128.png delete mode 100644 web/gui/lib/c3-0.4.18.min.js delete mode 100644 web/gui/lib/morris-0.5.1.min.js delete mode 100644 web/gui/lib/raphael-2.2.4-min.js create mode 100644 web/gui/manifest.json create mode 100644 web/gui/src/dashboard.js/charting/_c3.js create mode 100644 web/gui/src/dashboard.js/charting/_morris.js create mode 100644 web/gui/src/dashboard.js/charting/_raphael.js delete mode 100644 web/gui/version.txt delete mode 100644 web/server/Makefile.in delete mode 100644 web/server/multi/Makefile.am delete mode 100644 web/server/multi/Makefile.in delete mode 100644 web/server/multi/README.md delete mode 100644 web/server/multi/multi-threaded.c delete mode 100644 web/server/multi/multi-threaded.h delete mode 100644 web/server/single/Makefile.am delete mode 100644 web/server/single/Makefile.in delete mode 100644 web/server/single/README.md delete mode 100644 web/server/single/single-threaded.c delete mode 100644 web/server/single/single-threaded.h delete mode 100644 web/server/static/Makefile.in diff --git a/.codacy.yml b/.codacy.yml index ea322de83..0e552d718 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -5,6 +5,8 @@ exclude_paths: - collectors/python.d.plugin/python_modules/urllib3/** - collectors/python.d.plugin/python_modules/third_party/** - collectors/node.d.plugin/node_modules/** + - contrib/** + - packaging/makeself/** - web/gui/css/** - web/gui/lib/** - web/gui/old/** diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 586030e6d..72c793e0c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,8 +1,12 @@ +# Files which shouldn't be changed manually are owned by @netdatabot. +# This way we prevent modifications which will be overwriten by automation. + # Global (default) code owner * @ktsaou # Ownership by directory structure -.travis/ @paufantom +.travis/ @paufantom @cakrit +.github/ @paufantom @cakrit build/ @paulfantom backends/ @ktsaou @vlvkobal backends/graphite/ @ktsaou @vlvkobal @@ -17,26 +21,28 @@ collectors/node.d.plugin/ @ktsaou @gmosx collectors/node.d.plugin/fronius/ @ktsaou @gmosx @ccremer collectors/node.d.plugin/snmp/ @ktsaou @gmosx @cakrit collectors/node.d.plugin/stiebeleltron/ @ktsaou @gmosx @ccremer -collectors/python.d.plugin/ @l2isbad +collectors/python.d.plugin/ @ilyam8 daemon/ @ktsaou @vlvkobal database/ @ktsaou @mfundul -docker/ @paulfantom -health/ @ktsaou @mfundul +docs/ @cakrit +health/ @ktsaou @cakrit health/health.d/ @ktsaou @cakrit -health/notifications/ @ktsaou @Ferroin -installer/ @ktsaou @paulfantom +health/notifications/ @ktsaou @Ferroin @cakrit +installer/ @ktsaou @paulfantom @cakrit libnetdata/ @ktsaou @vlvkobal makeself/ @ktsaou @paulfantom packaging/ @paulfantom registry/ @ktsaou @gmosx streaming/ @ktsaou @mfundul -web/ @ktsaou @vlvkobal @gmosx +web/ @ktsaou @vlvkobal +web/gui/ @ktsaou @gmosx # Ownership by filetype (overwrites ownership by directory) *.md @ktsaou @cakrit *.am @paulfantom @ktsaou # Ownership of specific files +.gitignore @paulfantom @cakrit .travis.yml @paulfantom .lgtm.yml @paulfantom .eslintrc @paulfantom @@ -44,5 +50,10 @@ web/ @ktsaou @vlvkobal @gmosx .csslintrc @paulfantom .codeclimate.yml @paulfantom .codacy.yml @paulfantom +netdata.spec.in @paulfantom +netlify.toml @cakrit +package.json @gmosx +packaging/version @netdatabot +LICENSE.md @ktsaou CHANGELOG.md @netdatabot diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..4fe94ad65 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,10 @@ + + +#### Summary + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..fbd69a2f6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,34 @@ +--- +name: Bug report +about: Create a bug report to help us improve + +--- + + + +##### Bug report summary + +##### OS / Environment + +##### Netdata version (ouput of `netdata -V`) + +##### Component Name + +##### Steps To Reproduce + +##### Expected behavior diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..b27ba2653 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,16 @@ +--- +name: Feature request +about: Suggest an idea for our project + +--- + + + +##### Feature idea summary + +##### Expected behavior diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 000000000..9bdf6f14e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,25 @@ +--- +name: Question +about: You just want to ask a question? Go on. +--- + + + +##### Question summary + +##### OS / Environment + +##### Component Name + +##### Expected results diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..b4932f9c5 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,19 @@ + + +##### Summary + +##### Component Name + +##### Additional Information + diff --git a/.github/stale.yml b/.github/stale.yml new file mode 100644 index 000000000..dfa5ce2c0 --- /dev/null +++ b/.github/stale.yml @@ -0,0 +1,19 @@ +--- +only: issues +limitPerRun: 30 +daysUntilStale: 45 +daysUntilClose: 60 +exemptLabels: + - bug + - help wanted + - feature request +exemptProjects: true +exemptMilestones: true +staleLabel: stale +markComment: > + Currently netdata team doesn't have enough capacity to work on this issue. + We will be more than glad to accept a pull request with a solution to problem described here. + This issue will be closed after another 60 days of inactivity. +closeComment: > + This issue has been automatically closed due to extended period of inactivity. + Please reopen if it is still valid. Thank you for your contributions. diff --git a/.gitignore b/.gitignore index c64d75954..51b436152 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# Secrets +gcs-credentials.json + .deps .libs .dirstamp @@ -35,6 +38,8 @@ sha256sums.txt # netdata binaries netdata !netdata/ +upload/ +artifacts/ apps.plugin !apps.plugin/ @@ -42,15 +47,19 @@ apps.plugin freeipmi.plugin !freeipmi.plugin/ +cups.plugin +!cups.plugin/ + cgroup-network !cgroup-network/ -# netdata makeself archives +# installation artifacts +packaging/installer/.environment.sh *.tar.* *.run # netdata makeself downloads -makeself/tmp/ +packaging/makeself/tmp/ # coverity cov-int/ @@ -88,6 +97,8 @@ system/netdata.plist system/netdata-freebsd system/edit-config +daemon/anonymous-statistics.sh + health/notifications/alarm-notify.sh collectors/cgroups.plugin/cgroup-name.sh collectors/tc.plugin/tc-qos-helper.sh @@ -98,7 +109,6 @@ collectors/fping.plugin/fping.plugin # installer generated files netdata-uninstaller.sh -netdata-updater.sh # cmake files cmake-build-debug/ @@ -134,6 +144,7 @@ tests/profile/benchmark-line-parsing tests/profile/benchmark-procfile-parser tests/profile/benchmark-value-pairs tests/profile/statsd-stress +tests/health_mgmtapi/health-cmdapi-test.sh oprofile_data/ vgcore.* callgrind.out.* @@ -145,6 +156,9 @@ sitespeed-result/ python.d/python-modules-installer.sh # documentation generated files -htmldoc/src -htmldoc/build -htmldoc/mkdocs.yml +docs/generator/src +docs/generator/build +docs/generator/mkdocs.yml + +netdata-updater.sh +.environment.sh diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..248e627a2 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,85 @@ +dist: trusty +sudo: true +language: c +services: +- docker + +stages: +- test +- build +- name: packaging + if: branch = master AND type != pull_request AND type != cron +- name: nightlies + if: branch = master AND type = cron + +jobs: + include: + - stage: test + name: C + install: sudo apt-get install -y libcap2-bin zlib1g-dev uuid-dev fakeroot libipmimonitoring-dev libmnl-dev libnetfilter-acct-dev + script: fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it --enable-plugin-nfacct --enable-plugin-freeipmi --disable-lto && $HOME/netdata/usr/sbin/netdata -W unittest + env: CFLAGS='-O1 -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1' + - name: dashboard.js + script: cp web/gui/dashboard.js /tmp/dashboard.js && ./build/build.sh && diff /tmp/dashboard.js web/gui/dashboard.js + - name: coverity + install: sudo apt-get install -y zlib1g-dev uuid-dev libipmimonitoring-dev libmnl-dev libnetfilter-acct-dev + script: ./coverity-scan.sh || echo "Coverity failed :(" + env: INSTALL_COVERITY="true" + if: type = cron + + - stage: build +# TODO(paulfantom): enable when travis OSX become stable. Probably after 12.01.2019 +# name: OSX +# install: brew install fakeroot ossp-uuid +# script: fakeroot ./netdata-installer.sh --install $HOME --dont-wait --dont-start-it +# os: osx +# - name: ubuntu 14.04 (not containerized) + name: ubuntu 14.04 (not containerized) + install: sudo apt-get install -y libcap2-bin zlib1g-dev uuid-dev fakeroot + script: fakeroot ./netdata-installer.sh --dont-wait --dont-start-it --install $HOME + - name: build container (alpine installation) + script: ./packaging/docker/build.sh + env: DEVEL="true" + - name: ubuntu 18.04 + lifecycle + script: docker run -it -v "${PWD}:/code:rw" -w /code "netdata/os-test:ubuntu1804" bats --tap tests/lifecycle.bats + - name: CentOS 7 + script: docker run -it -v "${PWD}:/code:rw" -w /code "netdata/os-test:centos7" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp + - name: CentOS 6 + script: docker run -it -v "${PWD}:/code:rw" -w /code "netdata/os-test:centos6" ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp + + - stage: packaging + name: Create release (only on special commit msg) + install: + - sudo apt-get install -y gnupg libcap2-bin zlib1g-dev uuid-dev fakeroot python-pip + - sudo apt install -y --only-upgrade docker-ce + - docker info + before_script: sudo pip install git-semver + script: ".travis/releaser.sh && .travis/labeler.sh" # labeler should be replaced with GitHub Actions when they hit GA + git: + depth: false + + - stage: nightlies + name: Nightly build + before_install: openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d + install: + - sudo apt-get install -y gnupg libcap2-bin zlib1g-dev uuid-dev fakeroot + - sudo apt install -y --only-upgrade docker-ce + - docker info + script: ".travis/nightlies.sh" + git: + depth: false + deploy: + provider: gcs + edge: + branch: gcs-ng + project_id: netdata-storage + credentials: .travis/gcs-credentials.json + bucket: "netdata-nightlies" + skip_cleanup: true + local_dir: "artifacts" + +notifications: + webhooks: https://app.fossa.io/hooks/travisci + slack: + rooms: + - secure: "NuW1p7s+WGLcyhEceeiLRSV1JgAc6N47zgdSsYoxrjSFRQHDfc8jensypDcEJwgs1K2Hcve9iKRaAddEHEw7AkS6rie9gFR5HmmbKXfW2GFMqOr6maYTFsvaECPqiWk1n9/XnRLsAi5kZ8HxH+a8ldb/eaVoFQesY1jPXgh11BM5DwvpXjEtwg0WGASsKiymvnXFS3KcC+sR7Lln2GX1a8vfCX2I3TEmOedKMlSHUy5JilGGC3AWA0SWS8tR8PUH0u3dHL5j0RNIr1RO3Yx24QgUpg/YpvKymnW/iIIEOq2vb2mBhhiKEQjJ1djUL4VSPzjIDpUzThVpKaHk3syOp6W9qZEHKhR/sqjc5Yk2XRjsw1cM0nS60gaCgxtKhEMKWcjtvWf04oJAVrmcUwcYXj0eA+jgRCZl5VhyufK/fUJavjOfsQGjwhdjxQfwDCw33W17ypJUt4GZngdb6jbIhEOcKHSLQDu1vuHTw82hJJkthkmR59PX30qJdl/MEGcfVLdN/fkCokjR/qwfmkNwQm+wYSKsK/Jq4RgBT0/oZwY3e8nkCq2ov7lBbDO3/0rzQKWZ9Uy//tnoCM3vGhDwGHQxsHshv7g6KwdhYTcmm7WWWIucfLupcjFUO1HbRuJ+7ZnvxRRwKiV+MGkFT2SNJkS8q1/jCu9KGbmktd0WUSE=" diff --git a/.travis/README.md b/.travis/README.md index e37e9feff..d67df293d 100644 --- a/.travis/README.md +++ b/.travis/README.md @@ -5,10 +5,9 @@ - GITHUB_TOKEN - GitHub token with push access to repository - DOCKER_USERNAME - Username (netdatabot) with write access to docker hub repository - DOCKER_PASSWORD - Password to docker hub -- encrypted_decb6f6387c4_key - Something to do with package releasing (soon to be deprecated) -- encrypted_decb6f6387c4_iv - Something to do with package releasing (soon to be deprecated) -- OLD_DOCKER_USERNAME - Username used to push images to firehol/netdata # TODO: remove after deprecating that repo -- OLD_DOCKER_PASSWORD - Password used to push images to firehol/netdata # TODO: remove after deprecating that repo +- encrypted_8daf19481253_key - key needed by openssl to decrypt GCS credentials file +- encrypted_8daf19481253_iv - IV needed by openssl to decrypt GCS credentials file +- COVERITY_SCAN_TOKEN - Token to allow coverity test analysis uploads ## Stages @@ -16,6 +15,7 @@ Unit tests and coverage tests are executed here. Stage consists of 2 parallel jobs: - C tests - executed every time + - dashboard.js - test if source files create the same file as it is in current repo - coverity test - executed only when pipeline was triggered from cron ### Build @@ -29,18 +29,23 @@ installations of netdata. Jobs are run on following operating systems: - CentOS 7 (containerized) - alpine (containerized) +Images for system containers are stored on dockerhub and are created from Dockerfiles located in +[netdata/helper-images](https://github.com/netdata/helper-images) repository. + ### Packaging This stage is executed only on "master" brach and allows us to create a new tag just looking at git commit message. It executes one script called `releaser.sh` which is responsible for creating a release on GitHub by using [hub](https://github.com/github/hub). This script is also executing other scripts which can also be used in other CI jobs: - - `tagger.sh` - - `generate_changelog.sh` - - `build.sh` - - `create_artifacts.sh` + - `.travis/tagger.sh` + - `.travis/generate_changelog.sh` + - `packaging/docker/build.sh` + - `.travis/create_artifacts.sh` Alternatively new release can be also created by pushing new tag to master branch. +Additionally this step is also executing `.travis/labeler.sh` which is a temporary workaround to automatically label +issues and PR. This script should be replaced with GitHub Actions when they are available to public. ##### tagger.sh @@ -77,12 +82,10 @@ This is achieved by running 2 scripts described earlier: - `create_artifacts.sh` - `build.sh` +Artifacts are pushed to GCS and container images are stored in docker hub. + ##### Changelog generation This job is responsible for regenerating changelog every day by executing `generate_changelog.sh` script. This is done only once a day due to github rate limiter. -##### Labeler - -Once a day we are doing automatic label assignment by executing `labeler.sh`. This script is a temporary workaround until -we start using GitHub Actions. For more information what it is currently doing go to its code. diff --git a/.travis/containerized_build.sh b/.travis/containerized_build.sh deleted file mode 100755 index 314a2ec39..000000000 --- a/.travis/containerized_build.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -e - -docker build -t dev-image -f ".travis/images/Dockerfile.$1" . - -docker run -it -w /code dev-image ./netdata-installer.sh --dont-wait --dont-start-it --install /tmp diff --git a/.travis/create_artifacts.sh b/.travis/create_artifacts.sh index 40ba9c85f..ca0724e19 100755 --- a/.travis/create_artifacts.sh +++ b/.travis/create_artifacts.sh @@ -1,33 +1,36 @@ #!/bin/bash # shellcheck disable=SC2230 -if [ ! -f .gitignore ] -then - echo "Run as ./travis/$(basename "$0") from top level directory of git repository" - exit 1 +set -e + +if [ ! -f .gitignore ]; then + echo "Run as ./travis/$(basename "$0") from top level directory of git repository" + exit 1 fi +# Everything from this directory will be uploaded to GCS +mkdir -p artifacts +BASENAME="netdata-$(git describe)" + # Make sure stdout is in blocking mode. If we don't, then conda create will barf during downloads. # See https://github.com/travis-ci/travis-ci/issues/4704#issuecomment-348435959 for details. python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' echo "--- Create tarball ---" autoreconf -ivf -./configure +./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --with-zlib --with-math --with-user=netdata CFLAGS=-O2 make dist -echo "--- Create self-extractor ---" -./makeself/build-x86_64-static.sh +mv "${BASENAME}.tar.gz" artifacts/ -echo "--- Create checksums ---" -GIT_TAG=$(git tag --points-at) -if [ "${GIT_TAG}" != "" ]; then - ln -s netdata-latest.gz.run "netdata-${GIT_TAG}.gz.run" - ln -s netdata-*.tar.gz "netdata-${GIT_TAG}.tar.gz" - sha256sum -b "netdata-${GIT_TAG}.gz.run" "netdata-${GIT_TAG}.tar.gz" > "sha256sums.txt" -else - sha256sum -b ./*.tar.gz ./*.gz.run > "sha256sums.txt" -fi +echo "--- Create self-extractor ---" +./packaging/makeself/build-x86_64-static.sh +# Needed fo GCS +echo "--- Copy artifacts to separate directory ---" +#shellcheck disable=SC2164 +cd artifacts +ln -s "${BASENAME}.tar.gz" netdata-latest.tar.gz +ln -s "${BASENAME}.gz.run" netdata-latest.gz.run +sha256sum -b ./* >"sha256sums.txt" echo "checksums:" cat sha256sums.txt - diff --git a/.travis/gcs-credentials.json.enc b/.travis/gcs-credentials.json.enc new file mode 100644 index 000000000..5d1e7b2dd Binary files /dev/null and b/.travis/gcs-credentials.json.enc differ diff --git a/.travis/generate_changelog.sh b/.travis/generate_changelog.sh index d9b91113a..d1b72e071 100755 --- a/.travis/generate_changelog.sh +++ b/.travis/generate_changelog.sh @@ -2,10 +2,9 @@ set -e -if [ ! -f .gitignore ] -then - echo "Run as ./travis/$(basename "$0") from top level directory of git repository" - exit 1 +if [ ! -f .gitignore ]; then + echo "Run as ./travis/$(basename "$0") from top level directory of git repository" + exit 1 fi ORGANIZATION=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $1}') @@ -13,24 +12,22 @@ PROJECT=$(echo "$TRAVIS_REPO_SLUG" | awk -F '/' '{print $2}') GIT_MAIL=${GIT_MAIL:-"pawel+bot@netdata.cloud"} GIT_USER=${GIT_USER:-"netdatabot"} -echo "--- Initialize git configuration ---" -git config user.email "${GIT_MAIL}" -git config user.name "${GIT_USER}" +if [ -z ${GIT_TAG+x} ]; then + OPTS="" +else + OPTS="--future-release ${GIT_TAG}" +fi echo "--- Creating changelog ---" git checkout master git pull #docker run -it --rm -v "$(pwd)":/usr/local/src/your-app ferrarimarco/github-changelog-generator:1.14.3 \ docker run -it -v "$(pwd)":/project markmandel/github-changelog-generator:latest \ - --user "${ORGANIZATION}" \ - --project "${PROJECT}" \ - --token "${GITHUB_TOKEN}" \ - --since-tag "v1.10.0" \ - --unreleased-label "**Next release**" \ - --no-compare-link \ - --exclude-labels duplicate,question,invalid,wontfix,discussion,documentation + --user "${ORGANIZATION}" \ + --project "${PROJECT}" \ + --token "${GITHUB_TOKEN}" \ + --since-tag "v1.10.0" \ + --unreleased-label "**Next release**" \ + --exclude-labels "stale,duplicate,question,invalid,wontfix,discussion,no changelog" \ + --no-compare-link ${OPTS} -echo "--- Uploading changelog ---" -git add CHANGELOG.md -git commit -m '[ci skip] Automatic changelog update' || exit 0 -git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" diff --git a/.travis/images/Dockerfile.alpine b/.travis/images/Dockerfile.alpine deleted file mode 100644 index 15f527165..000000000 --- a/.travis/images/Dockerfile.alpine +++ /dev/null @@ -1,5 +0,0 @@ -FROM alpine:latest - -RUN apk add bash gcc make autoconf automake pkgconfig zlib-dev libuuid git libmnl-dev util-linux-dev build-base - -COPY . /code diff --git a/.travis/images/Dockerfile.centos6 b/.travis/images/Dockerfile.centos6 deleted file mode 100644 index c286fb95c..000000000 --- a/.travis/images/Dockerfile.centos6 +++ /dev/null @@ -1,5 +0,0 @@ -FROM centos:6 - -RUN yum install -y gcc make autoconf automake pkg-config zlib-devel libuuid-devel git - -COPY . /code diff --git a/.travis/images/Dockerfile.centos7 b/.travis/images/Dockerfile.centos7 deleted file mode 100644 index d94359dec..000000000 --- a/.travis/images/Dockerfile.centos7 +++ /dev/null @@ -1,5 +0,0 @@ -FROM centos:7 - -RUN yum install -y gcc make autoconf automake pkg-config zlib-devel libuuid-devel git - -COPY . /code diff --git a/.travis/images/Dockerfile.ubuntu1804 b/.travis/images/Dockerfile.ubuntu1804 deleted file mode 100644 index 30ac7cf71..000000000 --- a/.travis/images/Dockerfile.ubuntu1804 +++ /dev/null @@ -1,6 +0,0 @@ -FROM ubuntu:18.04 - -RUN apt-get update && \ - apt-get install -y gcc make autoconf automake pkg-config zlib1g-dev uuid-dev git - -COPY . /code diff --git a/.travis/labeler.sh b/.travis/labeler.sh index 47bf250a0..e8d7d228d 100755 --- a/.travis/labeler.sh +++ b/.travis/labeler.sh @@ -3,30 +3,95 @@ # This is a simple script which should apply labels to unlabelled issues from last 3 days. # It will soon be deprecated by GitHub Actions so no futher development on it is planned. +# Previously this was using POST to only add labels. But this method seems to be failing with larger number of requests +new_labels() { + ISSUE="$1" + URL="https://api.github.com/repos/netdata/netdata/issues/$ISSUE/labels" + # deduplicate array and add quotes + SET=( $(for i in "${@:2}"; do [ "$i" != "" ] && echo "\"$i\""; done | sort -u) ) + # implode array to string + LABELS="${SET[*]}" + # add commas between quotes (replace spaces) + LABELS="${LABELS//\" \"/\",\"}" + # remove duplicate quotes in case parameters were already quoted + LABELS="${LABELS//\"\"/\"}" + echo "-------- Assigning labels to #${ISSUE}: ${LABELS} --------" + curl -H "Authorization: token $GITHUB_TOKEN" -d "{\"labels\":[${LABELS}]}" -X PUT "${URL}" &>/dev/null +} + if [ "$GITHUB_TOKEN" == "" ]; then - echo "GITHUB_TOKEN is needed" - exit 1 + echo "GITHUB_TOKEN is needed" + exit 1 fi -# Download hub -HUB_VERSION=${HUB_VERSION:-"2.5.1"} -wget "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -O "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" -tar -C /tmp -xvf "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" &>/dev/null -export PATH=$PATH:"/tmp/hub-linux-amd64-${HUB_VERSION}/bin" +if ! [ -x "$(command -v hub)" ]; then + echo "===== Download HUB =====" + HUB_VERSION=${HUB_VERSION:-"2.5.1"} + wget "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -O "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" + tar -C /tmp -xvf "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz" &>/dev/null + export PATH=$PATH:"/tmp/hub-linux-amd64-${HUB_VERSION}/bin" +fi -echo "Looking up available labels" -LABELS_FILE=/tmp/exclude_labels -hub issue labels > $LABELS_FILE +echo "===== Looking up available labels =====" +LABELS_FILE=/tmp/labels +hub issue labels >$LABELS_FILE +echo "===== Categorizing issues =====" +# This won't touch issues which already have at least one label assigned for STATE in "open" "closed"; do - for ISSUE in $(hub issue -f "%I %l%n" -s "$STATE" -d "$(date +%F -d '3 days ago')" | grep -v -f $LABELS_FILE); do - echo "Processing $STATE issue no. $ISSUE" - URL="https://api.github.com/repos/netdata/netdata/issues/$ISSUE" - BODY="$(curl "${URL}" | jq .body 2>/dev/null)" - case "${BODY}" in - *"# Question summary"* ) curl -H "Authorization: token $GITHUB_TOKEN" -d '{"labels":["question"]}' -X PATCH "${URL}" ;; - *"# Bug report summary"* ) curl -H "Authorization: token $GITHUB_TOKEN" -d '{"labels":["bug"]}' -X PATCH "${URL}" ;; - * ) curl -H "Authorization: token $GITHUB_TOKEN" -d '{"labels":["needs triage"]}' -X PATCH "${URL}" ;; - esac - done + for ISSUE in $(hub issue -f "%I %l%n" -s "$STATE" -d "$(date +%F -d '1 day ago')" | grep -v -f $LABELS_FILE); do + echo "-------- Processing $STATE issue no. $ISSUE --------" + BODY="$(curl -H "Authorization: token $GITHUB_TOKEN" "https://api.github.com/repos/netdata/netdata/issues/$ISSUE" 2>/dev/null | jq .body)" + case "${BODY}" in + *"# Question summary"*) new_labels "$ISSUE" "question" "no changelog" ;; + *"# Bug report summary"*) new_labels "$ISSUE" "needs triage" "bug" ;; + *"# Feature idea summary"*) new_labels "$ISSUE" "needs triage" "feature request" ;; + *) new_labels "$ISSUE" "needs triage" "no changelog" ;; + esac + done +done + +# Change all 'area' labels assigned to PR saving non-area labels. +echo "===== Categorizing PRs =====" +NEW_LABELS=/tmp/new_labels +for PR in $(hub pr list -s all -f "%I%n" -L 10); do + echo "----- Processing PR #$PR -----" + echo "" >$NEW_LABELS + NEW_SET="" + DIFF_URL="https://github.com/netdata/netdata/pull/$PR.diff" + for FILE in $(curl -L "${DIFF_URL}" 2>/dev/null | grep "diff --git a/" | cut -d' ' -f3 | sort | uniq); do + LABEL="" + case "${FILE}" in + *".md") AREA="docs" ;; + *"/collectors/python.d.plugin/"*) AREA="external/python" ;; + *"/collectors/charts.d.plugin/"*) AREA="external" ;; + *"/collectors/node.d.plugin/"*) AREA="external" ;; + *"/.travis"*) AREA="ci" ;; + *"/.github/*.md"*) AREA="docs" ;; + *"/.github/"*) AREA="ci" ;; + *"/build/"*) AREA="packaging" ;; + *"/contrib/"*) AREA="packaging" ;; + *"/diagrams/"*) AREA="docs" ;; + *"/installer/"*) AREA="packaging" ;; + *"/makeself/"*) AREA="packaging" ;; + *"/system/"*) AREA="packaging" ;; + *"/netdata-installer.sh"*) AREA="packaging" ;; + *) AREA=$(echo "$FILE" | cut -d'/' -f2) ;; + esac + LABEL="area/$AREA" + echo "Selecting $LABEL due to $FILE" + if grep "$LABEL" "$LABELS_FILE"; then + echo "$LABEL" >>$NEW_LABELS + if [[ $LABEL =~ "external" ]]; then + echo "area/collectors" >>$NEW_LABELS + fi + else + echo "-------- Label '$LABEL' not available --------" + fi + done + NEW_SET=$(sort $NEW_LABELS | uniq) + if [ ! -z "$NEW_SET" ]; then + PREV=$(curl -H "Authorization: token $GITHUB_TOKEN" "https://api.github.com/repos/netdata/netdata/issues/$PR/labels" 2>/dev/null | jq '.[].name' | grep -v "area") + new_labels "$PR" ${NEW_SET} "${PREV[*]}" + fi done diff --git a/.travis/nightlies.sh b/.travis/nightlies.sh new file mode 100755 index 000000000..fd133d08a --- /dev/null +++ b/.travis/nightlies.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +BAD_THING_HAPPENED=0 + +if [ ! -f .gitignore ]; then + echo "Run as ./travis/$(basename "$0") from top level directory of git repository" + exit 1 +fi + +export GIT_MAIL="pawel+bot@netdata.cloud" +export GIT_USER="netdatabot" +echo "--- Initialize git configuration ---" +git config user.email "${GIT_MAIL}" +git config user.name "${GIT_USER}" + +echo "--- UPDATE VERSION FILE ---" +LAST_TAG=$(git describe --abbrev=0 --tags) +NO_COMMITS=$(git rev-list "$LAST_TAG"..HEAD --count) +if [ "$NO_COMMITS" == "$(rev packaging/version +git add packaging/version || exit 1 + +echo "--- GENERATE CHANGELOG ---" +if .travis/generate_changelog.sh; then + git add CHANGELOG.md + + echo "--- UPLOAD FILE CHANGES ---" + git commit -m '[ci skip] create nightly packages and update changelog' + git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" +else + git clean -xfd + BAD_THING_HAPPENED=1 +fi + +echo "--- BUILD & PUBLISH DOCKER IMAGES ---" +export REPOSITORY="netdata/netdata" +packaging/docker/build.sh || BAD_THING_HAPPENED=1 + +echo "--- BUILD ARTIFACTS ---" +.travis/create_artifacts.sh || BAD_THING_HAPPENED=1 + +exit "${BAD_THING_HAPPENED}" diff --git a/.travis/releaser.sh b/.travis/releaser.sh index c184cc726..870dec52c 100755 --- a/.travis/releaser.sh +++ b/.travis/releaser.sh @@ -34,17 +34,41 @@ export GIT_USER="netdatabot" echo "--- Initialize git configuration ---" git config user.email "${GIT_MAIL}" git config user.name "${GIT_USER}" +git checkout master +git pull echo "---- FIGURING OUT TAGS ----" # tagger.sh is sourced since we need environment variables it sets #shellcheck source=/dev/null source .travis/tagger.sh || exit 0 +# variable GIT_TAG is produced by tagger.sh script + +echo "---- UPDATE VERSION FILE ----" +echo "$GIT_TAG" >packaging/version +git add packaging/version + +echo "---- GENERATE CHANGELOG -----" +./.travis/generate_changelog.sh +git add CHANGELOG.md + +echo "---- COMMIT AND PUSH CHANGES ----" +git commit -m "[ci skip] release $GIT_TAG" +git tag "$GIT_TAG" -a -m "Automatic tag generation for travis build no. $TRAVIS_BUILD_NUMBER" +git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" +git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" --tags +# After those operations output of command `git describe` should be identical with a value of GIT_TAG + +if [[ $(git describe) =~ -rc* ]]; then + echo "This is a release candidate tag, exiting without artifact building" + exit 0 +fi echo "---- CREATING TAGGED DOCKER CONTAINERS ----" export REPOSITORY="netdata/netdata" -./docker/build.sh +./packaging/docker/build.sh echo "---- CREATING RELEASE ARTIFACTS -----" +# Artifacts are stored in `artifacts/` directory ./.travis/create_artifacts.sh echo "---- CREATING RELEASE DRAFT WITH ASSETS -----" @@ -63,12 +87,8 @@ if [ "${GIT_TAG}" != "$(git tag --points-at)" ]; then echo "ERROR! Current commit is not tagged. Stopping release creation." exit 1 fi -if [ -z ${RC+x} ]; then - hub release create --prerelease --draft -a "netdata-${GIT_TAG}.tar.gz" -a "netdata-${GIT_TAG}.gz.run" -a "sha256sums.txt" -m "${GIT_TAG}" "${GIT_TAG}" -else - hub release create --draft -a "netdata-${GIT_TAG}.tar.gz" -a "netdata-${GIT_TAG}.gz.run" -a "sha256sums.txt" -m "${GIT_TAG}" "${GIT_TAG}" -fi - -# Changelog needs to be created AFTER new release to avoid problems with circular dependencies and wrong entries in changelog file -echo "---- GENERATING CHANGELOG -----" -./.travis/generate_changelog.sh +hub release create --draft \ + -a "artifacts/netdata-${GIT_TAG}.tar.gz" \ + -a "artifacts/netdata-${GIT_TAG}.gz.run" \ + -a "artifacts/sha256sums.txt" \ + -m "${GIT_TAG}" "${GIT_TAG}" diff --git a/.travis/tagger.sh b/.travis/tagger.sh index b1907c347..e72c5721c 100755 --- a/.travis/tagger.sh +++ b/.travis/tagger.sh @@ -15,6 +15,8 @@ # - GITHUB_TOKEN variable set with GitHub token. Access level: repo.public_repo # - git-semver python package (pip install git-semver) +# exported variables are needed by releaser.sh + set -e if [ ! -f .gitignore ]; then @@ -22,33 +24,19 @@ if [ ! -f .gitignore ]; then exit 1 fi -# Embed new version in files which need it. -# This wouldn't be needed if we could use `git tag` everywhere. -function embed_version { - VERSION="$1" - MAJOR=$(echo "$GIT_TAG" | cut -d . -f 1 | cut -d v -f 2) - MINOR=$(echo "$GIT_TAG" | cut -d . -f 2) - PATCH=$(echo "$GIT_TAG" | cut -d . -f 3 | cut -d '-' -f 1) - sed -i "s/\\[VERSION_MAJOR\\], \\[.*\\]/\\[VERSION_MAJOR\\], \\[$MAJOR\\]/" configure.ac - sed -i "s/\\[VERSION_MINOR\\], \\[.*\\]/\\[VERSION_MINOR\\], \\[$MINOR\\]/" configure.ac - sed -i "s/\\[VERSION_PATCH\\], \\[.*\\]/\\[VERSION_PATCH\\], \\[$PATCH\\]/" configure.ac - git add configure.ac -} - # Figure out what will be new release candidate tag based only on previous ones. # This assumes that RELEASES are in format of "v0.1.2" and prereleases (RCs) are using "v0.1.2-rc0" -function release_candidate { +function release_candidate() { LAST_TAG=$(git semver) if [[ $LAST_TAG =~ -rc* ]]; then - LAST_RELEASE=$(echo "$LAST_TAG" | cut -d'-' -f 1) + VERSION=$(echo "$LAST_TAG" | cut -d'-' -f 1) LAST_RC=$(echo "$LAST_TAG" | cut -d'c' -f 2) RC=$((LAST_RC + 1)) else - LAST_RELEASE=$LAST_TAG + VERSION="$(git semver --next-minor)" RC=0 fi - GIT_TAG="v$LAST_RELEASE-rc$RC" - export GIT_TAG + GIT_TAG="v$VERSION-rc$RC" } # Check if current commit is tagged or not @@ -62,20 +50,10 @@ if [ -z "${GIT_TAG}" ]; then *"[netdata minor release]"*) GIT_TAG="v$(git semver --next-minor)" ;; *"[netdata major release]"*) GIT_TAG="v$(git semver --next-major)" ;; *"[netdata release candidate]"*) release_candidate ;; - *) echo "Keyword not detected. Exiting..."; exit 1;; + *) + echo "Keyword not detected. Exiting..." + exit 0 + ;; esac - # Tag it! - if [ "$GIT_TAG" != "HEAD" ]; then - echo "Assigning a new tag: $GIT_TAG" - embed_version "$GIT_TAG" - git commit -m "[ci skip] release $GIT_TAG" - git tag "$GIT_TAG" -a -m "Automatic tag generation for travis build no. $TRAVIS_BUILD_NUMBER" - git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" - git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" --tags - fi -else - embed_version "$GIT_TAG" - git commit -m "[ci skip] release $GIT_TAG" - git push "https://${GITHUB_TOKEN}:@$(git config --get remote.origin.url | sed -e 's/^https:\/\///')" fi export GIT_TAG diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..7f48476c6 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,835 @@ +# Changelog + +## [v1.12.0](https://github.com/netdata/netdata/tree/v1.12.0) (2019-02-06) + +**Fixed bugs:** + +- cups.plugin fails to be compiled [\#5324](https://github.com/netdata/netdata/issues/5324) +- Slack alert displaying URL after manual update of net-data [\#5301](https://github.com/netdata/netdata/issues/5301) +- Netdata update in a /tmp hardened system [\#5289](https://github.com/netdata/netdata/issues/5289) +- Certificate error while running netdata kickstart script [\#5273](https://github.com/netdata/netdata/issues/5273) +- Netdata won't update anymore [\#5272](https://github.com/netdata/netdata/issues/5272) +- alarm-notify.sh not working with latest update of netdata [\#5261](https://github.com/netdata/netdata/issues/5261) +- /etc/netdata/edit-config charts.d.conf [\#5252](https://github.com/netdata/netdata/issues/5252) +- Cannot install netdata from source \(the source directory does not include netdata-installer.sh\) [\#5251](https://github.com/netdata/netdata/issues/5251) +- Non-interactive install fails if required packages are already present [\#5240](https://github.com/netdata/netdata/issues/5240) +- apps.plugin memory usage bug [\#5237](https://github.com/netdata/netdata/issues/5237) +- Automatic updates \(via CRON\) giving error [\#5229](https://github.com/netdata/netdata/issues/5229) +- Updater script no longer seems to be working after a recent update [\#5228](https://github.com/netdata/netdata/issues/5228) +- Cron Update fails \(again\) [\#5208](https://github.com/netdata/netdata/issues/5208) +- It is netdata instalation hacked ? [\#5207](https://github.com/netdata/netdata/issues/5207) +- Wrong version string in GUI [\#5204](https://github.com/netdata/netdata/issues/5204) +- GUI links to github wiki [\#5202](https://github.com/netdata/netdata/issues/5202) +- Version checker shouldn't compare commits [\#5201](https://github.com/netdata/netdata/issues/5201) +- python.d/dockerd plugin update error [\#5200](https://github.com/netdata/netdata/issues/5200) +- Netdata registry with basic auth \(behind nginx proxy\) results in error 409 [\#5180](https://github.com/netdata/netdata/issues/5180) +- alarm-notify.sh: WARNING: Cannot find file [\#5136](https://github.com/netdata/netdata/issues/5136) +- Netdata w/ Docker Container not show Disk space utilization for mounts [\#5071](https://github.com/netdata/netdata/issues/5071) +- zfs charts appear, even when they are zero [\#4115](https://github.com/netdata/netdata/issues/4115) +- Ceph - No JSON object could be decoded [\#3563](https://github.com/netdata/netdata/issues/3563) + +**Closed issues:** + +- integrate go-orchestrator into go.d.plugin [\#5308](https://github.com/netdata/netdata/issues/5308) +- Update not working or UI just showing wrong information? How to uninstall? [\#5285](https://github.com/netdata/netdata/issues/5285) +- Slack Notifications Ignored by alarm-notify.sh [\#5267](https://github.com/netdata/netdata/issues/5267) +- varnish plugin doesn't support custom varnishd working directory [\#5262](https://github.com/netdata/netdata/issues/5262) +- Developing a new plugin questions [\#5235](https://github.com/netdata/netdata/issues/5235) +- Split go.d plugin into two packages [\#5195](https://github.com/netdata/netdata/issues/5195) +- move python module nvidia\_smi to go.d [\#5190](https://github.com/netdata/netdata/issues/5190) +- Logstash monitoring [\#5147](https://github.com/netdata/netdata/issues/5147) +- integrate go.d into netdata [\#5006](https://github.com/netdata/netdata/issues/5006) +- new database format design [\#4687](https://github.com/netdata/netdata/issues/4687) +- \[REQUEST\] Prowl integration for iOS users? [\#3788](https://github.com/netdata/netdata/issues/3788) +- CUPS information [\#857](https://github.com/netdata/netdata/issues/857) + +**Merged pull requests:** + +- Fix Codacy issues for FreeBSD plugin [\#5334](https://github.com/netdata/netdata/pull/5334) ([vlvkobal](https://github.com/vlvkobal)) +- portcheck: remove unused var [\#5332](https://github.com/netdata/netdata/pull/5332) ([ilyam8](https://github.com/ilyam8)) +- fix some python codacy errors [\#5331](https://github.com/netdata/netdata/pull/5331) ([ilyam8](https://github.com/ilyam8)) +- Remove codacy warnings from sma\_webbox [\#5330](https://github.com/netdata/netdata/pull/5330) ([cakrit](https://github.com/cakrit)) +- Allow user to override the default behavior for read-only mounts [\#5327](https://github.com/netdata/netdata/pull/5327) ([vlvkobal](https://github.com/vlvkobal)) +- Remove deprecated API call [\#5326](https://github.com/netdata/netdata/pull/5326) ([Aisbergg](https://github.com/Aisbergg)) +- fix compilation of cups.plugin; fixes \#5324 [\#5325](https://github.com/netdata/netdata/pull/5325) ([ktsaou](https://github.com/ktsaou)) +- Clarify that uninstaller.sh needs to be downloaded [\#5315](https://github.com/netdata/netdata/pull/5315) ([cakrit](https://github.com/cakrit)) +- Remove registrypath from alarm-notify [\#5302](https://github.com/netdata/netdata/pull/5302) ([cakrit](https://github.com/cakrit)) +- Minor updates to anonymous statistics [\#5295](https://github.com/netdata/netdata/pull/5295) ([cakrit](https://github.com/cakrit)) +- kickstart: noexec detection [\#5293](https://github.com/netdata/netdata/pull/5293) ([paulfantom](https://github.com/paulfantom)) +- Correct info on what kickstart.sh does [\#5292](https://github.com/netdata/netdata/pull/5292) ([cakrit](https://github.com/cakrit)) +- Add errno to fatal event for statistics [\#5291](https://github.com/netdata/netdata/pull/5291) ([cakrit](https://github.com/cakrit)) +- updated cncf landscape url [\#5288](https://github.com/netdata/netdata/pull/5288) ([ktsaou](https://github.com/ktsaou)) +- Add back the symlink netdata-latest.gz.run [\#5286](https://github.com/netdata/netdata/pull/5286) ([cakrit](https://github.com/cakrit)) +- Additional UI fixes [\#5284](https://github.com/netdata/netdata/pull/5284) ([gmosx](https://github.com/gmosx)) +- GUI Update check - use version instead of commit [\#5283](https://github.com/netdata/netdata/pull/5283) ([cakrit](https://github.com/cakrit)) +- Correct auto-updater to netdata-updater [\#5281](https://github.com/netdata/netdata/pull/5281) ([cakrit](https://github.com/cakrit)) +- netdata update instructions after recent changes [\#5277](https://github.com/netdata/netdata/pull/5277) ([cakrit](https://github.com/cakrit)) +- Fix incorrect parsing of ACLs [\#5275](https://github.com/netdata/netdata/pull/5275) ([cakrit](https://github.com/cakrit)) +- Improve apps grouping config and docs [\#5269](https://github.com/netdata/netdata/pull/5269) ([vlvkobal](https://github.com/vlvkobal)) +- Always run make clean before make [\#5265](https://github.com/netdata/netdata/pull/5265) ([cakrit](https://github.com/cakrit)) +- varnish module: add instance\_name option [\#5264](https://github.com/netdata/netdata/pull/5264) ([ilyam8](https://github.com/ilyam8)) +- Bug fix for 5261 [\#5263](https://github.com/netdata/netdata/pull/5263) ([cakrit](https://github.com/cakrit)) +- ceph module bugfix: fix invalid json response [\#5260](https://github.com/netdata/netdata/pull/5260) ([ilyam8](https://github.com/ilyam8)) +- Fix typo in docs/configuration-guide.md [\#5259](https://github.com/netdata/netdata/pull/5259) ([u32i64](https://github.com/u32i64)) +- SUSE addition [\#5258](https://github.com/netdata/netdata/pull/5258) ([dannysauer](https://github.com/dannysauer)) +- Check version.txt in correct directory and fix link to docs [\#5256](https://github.com/netdata/netdata/pull/5256) ([cakrit](https://github.com/cakrit)) +- Mysql charts fix [\#5250](https://github.com/netdata/netdata/pull/5250) ([ilyam8](https://github.com/ilyam8)) +- plugins.d doc: Remove empty similar headline [\#5245](https://github.com/netdata/netdata/pull/5245) ([simonnagl](https://github.com/simonnagl)) +- Pass correct options to the configure command [\#5244](https://github.com/netdata/netdata/pull/5244) ([cakrit](https://github.com/cakrit)) +- Update kickstart.sh md5sum in docs [\#5242](https://github.com/netdata/netdata/pull/5242) ([cakrit](https://github.com/cakrit)) +- Fix check for install-required-packages.sh [\#5241](https://github.com/netdata/netdata/pull/5241) ([cakrit](https://github.com/cakrit)) +- Fix nightly builds and cron autoupdater [\#5232](https://github.com/netdata/netdata/pull/5232) ([paulfantom](https://github.com/paulfantom)) +- Remove v before the version [\#5223](https://github.com/netdata/netdata/pull/5223) ([cakrit](https://github.com/cakrit)) +- Instruct users to use edit-config [\#5222](https://github.com/netdata/netdata/pull/5222) ([cakrit](https://github.com/cakrit)) +- Improvements to QoS \(tc\) documentation [\#5221](https://github.com/netdata/netdata/pull/5221) ([cakrit](https://github.com/cakrit)) +- python dockerd module: check version [\#5217](https://github.com/netdata/netdata/pull/5217) ([ilyam8](https://github.com/ilyam8)) +- Bug fix for netdata behind authenticated proxies [\#5216](https://github.com/netdata/netdata/pull/5216) ([cakrit](https://github.com/cakrit)) +- add go.d.plugin to apps\_groups.conf [\#5214](https://github.com/netdata/netdata/pull/5214) ([ilyam8](https://github.com/ilyam8)) +- Don't show zero charts for ZFS filesystem [\#5211](https://github.com/netdata/netdata/pull/5211) ([vlvkobal](https://github.com/vlvkobal)) +- install go.d.plugin [\#5199](https://github.com/netdata/netdata/pull/5199) ([paulfantom](https://github.com/paulfantom)) +- Correct link to Rest API [\#5193](https://github.com/netdata/netdata/pull/5193) ([cakrit](https://github.com/cakrit)) +- CUPS plugin [\#5188](https://github.com/netdata/netdata/pull/5188) ([simonnagl](https://github.com/simonnagl)) +- alarm-notify: Add Prowl integration for iOS users. [\#5132](https://github.com/netdata/netdata/pull/5132) ([Ferroin](https://github.com/Ferroin)) +- Anonymous statistics [\#5113](https://github.com/netdata/netdata/pull/5113) ([cakrit](https://github.com/cakrit)) +- Update info on plugins in performance doc [\#5101](https://github.com/netdata/netdata/pull/5101) ([cakrit](https://github.com/cakrit)) +- Cloud Sign-In [\#5095](https://github.com/netdata/netdata/pull/5095) ([gmosx](https://github.com/gmosx)) + +## [v1.12.0-rc3](https://github.com/netdata/netdata/tree/v1.12.0-rc3) (2019-01-17) + +**Fixed bugs:** + +- megacli isn't included in python.d.conf [\#5191](https://github.com/netdata/netdata/issues/5191) +- Unix Domain Socket no longer working. Permission denied [\#5181](https://github.com/netdata/netdata/issues/5181) +- netdata-updater.sh doesn't have exec perms [\#5175](https://github.com/netdata/netdata/issues/5175) +- FireQoS name not showing due to recent change [\#5171](https://github.com/netdata/netdata/issues/5171) +- python go\_expvar: reuse same expvar key in different charts [\#5133](https://github.com/netdata/netdata/issues/5133) +- hddtemp.chart.py is hardcoded to only use /dev/sdX [\#5129](https://github.com/netdata/netdata/issues/5129) +- RabbitMQ Plugin wrong metrics for nodes in cluster [\#5118](https://github.com/netdata/netdata/issues/5118) +- cannot install netdata [\#5117](https://github.com/netdata/netdata/issues/5117) +- Anomalous \(big\) values on graphite/carbon [\#5104](https://github.com/netdata/netdata/issues/5104) +- \[Bug\] Stale metrics being exported to prometheus [\#5064](https://github.com/netdata/netdata/issues/5064) +- Uninstaller script should be self-contained [\#5031](https://github.com/netdata/netdata/issues/5031) +- Netdata doesn't properly lookup docker container name when running in ECS with task level cpu/memory limits enabled [\#4981](https://github.com/netdata/netdata/issues/4981) +- Dashboard TV white page [\#4710](https://github.com/netdata/netdata/issues/4710) +- Review of system.ram plugin: treat Slab memory as Cached \(PR 3288\) [\#3929](https://github.com/netdata/netdata/issues/3929) +- Fix for unix sockets after addition of port ACLs [\#5184](https://github.com/netdata/netdata/pull/5184) ([cakrit](https://github.com/cakrit)) + +**Closed issues:** + +- Remove support for multi-threaded and single-threaded web servers [\#5154](https://github.com/netdata/netdata/issues/5154) +- Use GCS instead of git for updating netdata [\#5110](https://github.com/netdata/netdata/issues/5110) +- error.log: IPv6 not properly show in error messages [\#5067](https://github.com/netdata/netdata/issues/5067) +- Introduce Polymorphic Linux in the Docker Image [\#5034](https://github.com/netdata/netdata/issues/5034) +- Allow netdata to listen to multiple ports [\#5017](https://github.com/netdata/netdata/issues/5017) +- SNMP section not visible [\#4021](https://github.com/netdata/netdata/issues/4021) +- allow different ports for streaming reception and API requests [\#3830](https://github.com/netdata/netdata/issues/3830) +- Consul monitoring service health checks [\#3674](https://github.com/netdata/netdata/issues/3674) +- maintenance time and silence time [\#3187](https://github.com/netdata/netdata/issues/3187) +- Suppressing alerts programatically [\#2673](https://github.com/netdata/netdata/issues/2673) +- include chart values in alarm info text [\#2351](https://github.com/netdata/netdata/issues/2351) +- allow streamed data to be received on dedicated port [\#2149](https://github.com/netdata/netdata/issues/2149) +- alarm notifications should state a count of active alarms per state [\#946](https://github.com/netdata/netdata/issues/946) + +**Merged pull requests:** + +- update bug\_report.md [\#5205](https://github.com/netdata/netdata/pull/5205) ([ilyam8](https://github.com/ilyam8)) +- add missing modules to python.d.conf [\#5194](https://github.com/netdata/netdata/pull/5194) ([ilyam8](https://github.com/ilyam8)) +- remove double 'afraid to' in CONTRIBUTING.md [\#5189](https://github.com/netdata/netdata/pull/5189) ([arkamar](https://github.com/arkamar)) +- Use tarballs from GCS in kickstart.sh [\#5185](https://github.com/netdata/netdata/pull/5185) ([paulfantom](https://github.com/paulfantom)) +- fix for fireqos classname not showing [\#5176](https://github.com/netdata/netdata/pull/5176) ([psychomelet](https://github.com/psychomelet)) +- GCS-based updater [\#5174](https://github.com/netdata/netdata/pull/5174) ([paulfantom](https://github.com/paulfantom)) +- Updated Polyverse reinstall commands in Dockerfile [\#5173](https://github.com/netdata/netdata/pull/5173) ([archisgore](https://github.com/archisgore)) +- Change how the ip address and port are logged in socket.c [\#5166](https://github.com/netdata/netdata/pull/5166) ([krinfels](https://github.com/krinfels)) +- Correct SNMP module name in plugin error handling [\#5153](https://github.com/netdata/netdata/pull/5153) ([pablerass](https://github.com/pablerass)) +- Fix cached memory calculation [\#5151](https://github.com/netdata/netdata/pull/5151) ([vlvkobal](https://github.com/vlvkobal)) +- Fix typo in plugins.d/README.md [\#5150](https://github.com/netdata/netdata/pull/5150) ([arkamar](https://github.com/arkamar)) +- "Network Traffic \(system.net\)" is always zero on FreeBSD virtual machines if hypervisor uses VirtIO NIC [\#5149](https://github.com/netdata/netdata/pull/5149) ([vladmovchan](https://github.com/vladmovchan)) +- rabbitmq: api/nodes requests fix [\#5142](https://github.com/netdata/netdata/pull/5142) ([ilyam8](https://github.com/ilyam8)) +- go\_expavar fix: don't check for duplicate expvars [\#5141](https://github.com/netdata/netdata/pull/5141) ([ilyam8](https://github.com/ilyam8)) +- hddtemp fix: don't use disk model as dim name [\#5140](https://github.com/netdata/netdata/pull/5140) ([ilyam8](https://github.com/ilyam8)) +- add option to opt-out from telemetry program [\#5138](https://github.com/netdata/netdata/pull/5138) ([paulfantom](https://github.com/paulfantom)) +- Scramble packages in docker images with polymorphic linux [\#5137](https://github.com/netdata/netdata/pull/5137) ([paulfantom](https://github.com/paulfantom)) +- change ownership of .gitignore [\#5131](https://github.com/netdata/netdata/pull/5131) ([paulfantom](https://github.com/paulfantom)) +- Update Charts.md [\#5124](https://github.com/netdata/netdata/pull/5124) ([mfundul](https://github.com/mfundul)) +- self-contained uninstaller [\#5121](https://github.com/netdata/netdata/pull/5121) ([paulfantom](https://github.com/paulfantom)) +- force git describe to always create a version [\#5119](https://github.com/netdata/netdata/pull/5119) ([paulfantom](https://github.com/paulfantom)) +- Clarify backend modes of operation [\#5116](https://github.com/netdata/netdata/pull/5116) ([cakrit](https://github.com/cakrit)) +- web-site content; why-netdata content [\#5097](https://github.com/netdata/netdata/pull/5097) ([ktsaou](https://github.com/ktsaou)) +- Add variables to alarm-notify.sh [\#5096](https://github.com/netdata/netdata/pull/5096) ([cakrit](https://github.com/cakrit)) +- do not report stale metrics to prometheus [\#5084](https://github.com/netdata/netdata/pull/5084) ([ktsaou](https://github.com/ktsaou)) +- Unify versioning [\#5051](https://github.com/netdata/netdata/pull/5051) ([paulfantom](https://github.com/paulfantom)) +- Port ACLs, Management API and Health commands [\#4969](https://github.com/netdata/netdata/pull/4969) ([cakrit](https://github.com/cakrit)) +- Generate a configure script for RPM build \(\#4570\) [\#4571](https://github.com/netdata/netdata/pull/4571) ([ananace](https://github.com/ananace)) + +## [v1.12.0-rc2](https://github.com/netdata/netdata/tree/v1.12.0-rc2) (2019-01-03) + +**Fixed bugs:** + +- smartd\_log: check\(\) unhandled exception: list index out of range [\#5079](https://github.com/netdata/netdata/issues/5079) +- Additional character in Counter64 hex string [\#5028](https://github.com/netdata/netdata/issues/5028) +- Error every second PLUGIN\[proc\] [\#4994](https://github.com/netdata/netdata/issues/4994) +- Inconsistency in netdata.spec.in when comparing logdir permission with git-installation [\#4963](https://github.com/netdata/netdata/issues/4963) +- Docker-compose: a lot of errors; Connection refused, Can't establish connection to MySQL... [\#4956](https://github.com/netdata/netdata/issues/4956) +- Log flooding with new proc plugin [\#4945](https://github.com/netdata/netdata/issues/4945) +- Free memory shows as 'inactive' in FreeBSD [\#4737](https://github.com/netdata/netdata/issues/4737) +- Should use IEC-compliant abbreviations, e.g. KiB, MiB, etc. [\#4711](https://github.com/netdata/netdata/issues/4711) +- FreeBSD: \(apps.cpu\) not show a specific program [\#4037](https://github.com/netdata/netdata/issues/4037) +- Apcupsd: Connection loss further collects data, but it should stop [\#3927](https://github.com/netdata/netdata/issues/3927) +- FreeBSD: apps.plugin reports spikes and apps.cpu less user CPU [\#3245](https://github.com/netdata/netdata/issues/3245) + +**Closed issues:** + +- disable respect `Retry-After` response header in python UrlService by default [\#5078](https://github.com/netdata/netdata/issues/5078) +- move freeradius module to go.d [\#5063](https://github.com/netdata/netdata/issues/5063) +- move python module dns\_query\_time to go.d [\#5047](https://github.com/netdata/netdata/issues/5047) +- move python module web\_log to go.d [\#5046](https://github.com/netdata/netdata/issues/5046) +- R&D: Collectors landscape page [\#5045](https://github.com/netdata/netdata/issues/5045) +- Copy updater script instead of linking it [\#4924](https://github.com/netdata/netdata/issues/4924) +- Activemq monitoring [\#4818](https://github.com/netdata/netdata/issues/4818) +- Move packaging related code into `packaging/` directory [\#4611](https://github.com/netdata/netdata/issues/4611) +- Simplify makeself [\#4527](https://github.com/netdata/netdata/issues/4527) +- new netdata logo [\#4476](https://github.com/netdata/netdata/issues/4476) +- Add info on disabling alarms for specific target - part 2 [\#4324](https://github.com/netdata/netdata/issues/4324) +- Add info on disabling alarms for specific target - part 1 [\#4323](https://github.com/netdata/netdata/issues/4323) +- Document how to monitor log files [\#4318](https://github.com/netdata/netdata/issues/4318) +- Solr monitoring [\#3218](https://github.com/netdata/netdata/issues/3218) + +**Merged pull requests:** + +- postgres : fix WAL query [\#5105](https://github.com/netdata/netdata/pull/5105) ([anayrat](https://github.com/anayrat)) +- Correct memory usage statement in memory=none [\#5100](https://github.com/netdata/netdata/pull/5100) ([cakrit](https://github.com/cakrit)) +- fix permissions for log files when building rpms [\#5099](https://github.com/netdata/netdata/pull/5099) ([paulfantom](https://github.com/paulfantom)) +- fix web site install link [\#5092](https://github.com/netdata/netdata/pull/5092) ([ktsaou](https://github.com/ktsaou)) +- Removed c3, morris and raphael JS libraries \(\#5086\) [\#5088](https://github.com/netdata/netdata/pull/5088) ([gmosx](https://github.com/gmosx)) +- Improve instructions on how to view the slave UI [\#5083](https://github.com/netdata/netdata/pull/5083) ([cakrit](https://github.com/cakrit)) +- UrlService dont respect Retry-After header by default [\#5082](https://github.com/netdata/netdata/pull/5082) ([ilyam8](https://github.com/ilyam8)) +- smartd\_log: skip non-CSVs early [\#5081](https://github.com/netdata/netdata/pull/5081) ([kevlar1818](https://github.com/kevlar1818)) +- Dashboard grammar change [\#5080](https://github.com/netdata/netdata/pull/5080) ([Xalaxis](https://github.com/Xalaxis)) +- Add systemd pattern list parameter to the documentation [\#5077](https://github.com/netdata/netdata/pull/5077) ([vlvkobal](https://github.com/vlvkobal)) +- Fix update instructions URL in frontend [\#5076](https://github.com/netdata/netdata/pull/5076) ([jorisvervuurt](https://github.com/jorisvervuurt)) +- Add how to add new alarm [\#5069](https://github.com/netdata/netdata/pull/5069) ([cakrit](https://github.com/cakrit)) +- Fix cpuidle statistics in containers [\#5065](https://github.com/netdata/netdata/pull/5065) ([vlvkobal](https://github.com/vlvkobal)) +- Fix coverity issues [\#5061](https://github.com/netdata/netdata/pull/5061) ([vlvkobal](https://github.com/vlvkobal)) +- Disable cpuidle module if schedstat file is missing [\#5059](https://github.com/netdata/netdata/pull/5059) ([vlvkobal](https://github.com/vlvkobal)) +- Fixed typo [\#5054](https://github.com/netdata/netdata/pull/5054) ([samnela](https://github.com/samnela)) +- New option clear\_alarm\_always [\#5050](https://github.com/netdata/netdata/pull/5050) ([dex4er](https://github.com/dex4er)) +- fix IEC units in bash modules [\#5049](https://github.com/netdata/netdata/pull/5049) ([paulfantom](https://github.com/paulfantom)) +- Gracefully ignore the offset if the value is not a number [\#5040](https://github.com/netdata/netdata/pull/5040) ([cakrit](https://github.com/cakrit)) +- Fix process statistics collection for FreeBSD in apps.plugin [\#5038](https://github.com/netdata/netdata/pull/5038) ([vlvkobal](https://github.com/vlvkobal)) +- Apcupsd add check for UPS online [\#5037](https://github.com/netdata/netdata/pull/5037) ([cakrit](https://github.com/cakrit)) +- Add warning for offset in Counter64 metrics [\#5032](https://github.com/netdata/netdata/pull/5032) ([cakrit](https://github.com/cakrit)) +- Add other web servers to proxy instructions [\#5027](https://github.com/netdata/netdata/pull/5027) ([cakrit](https://github.com/cakrit)) +- copy updater script instead of linking it [\#5010](https://github.com/netdata/netdata/pull/5010) ([paulfantom](https://github.com/paulfantom)) + +## [v1.12.0-rc1](https://github.com/netdata/netdata/tree/v1.12.0-rc1) (2018-12-19) + +**Fixed bugs:** + +- mdstat module causing netdata segv and crash [\#4990](https://github.com/netdata/netdata/issues/4990) +- Cannot read /proc/mdstat line. Expected 7 params, read 6. [\#4975](https://github.com/netdata/netdata/issues/4975) +- custom notification method does not work [\#4968](https://github.com/netdata/netdata/issues/4968) +- Info logging command in netdata-updater.sh contains command substitution. [\#4950](https://github.com/netdata/netdata/issues/4950) +- No data in charts [\#4920](https://github.com/netdata/netdata/issues/4920) +- Postgres module: detect servers version and use the right query [\#4910](https://github.com/netdata/netdata/issues/4910) +- Uninstaller script is always interactive [\#4791](https://github.com/netdata/netdata/issues/4791) +- Cannot update & cannot disable mail logging of events [\#4557](https://github.com/netdata/netdata/issues/4557) +- web\_log plugin cannot handle high load traffic [\#4354](https://github.com/netdata/netdata/issues/4354) +- \[bug\]some metrics don't report to /allmetrics endpoint with prometheus format [\#3866](https://github.com/netdata/netdata/issues/3866) + +**Closed issues:** + +- move python module portcheck to go.d [\#5005](https://github.com/netdata/netdata/issues/5005) +- move python module httpcheck to go.d [\#5004](https://github.com/netdata/netdata/issues/5004) +- move python module lighttpd to go.d [\#5003](https://github.com/netdata/netdata/issues/5003) +- move python module rabbitmq to go.d [\#5002](https://github.com/netdata/netdata/issues/5002) +- move python module nginx to go.d [\#5001](https://github.com/netdata/netdata/issues/5001) +- move python module apache to go.d [\#5000](https://github.com/netdata/netdata/issues/5000) +- Pass cloud\_base\_url from netdata.conf to web/gui [\#4980](https://github.com/netdata/netdata/issues/4980) +- Improve configuration documentation [\#4781](https://github.com/netdata/netdata/issues/4781) +- Python.d.plugin infinite retries, ignore penalty, and plotting 'None' [\#4756](https://github.com/netdata/netdata/issues/4756) +- move `/proc` and `/sys` python modules to `proc` plugin [\#4541](https://github.com/netdata/netdata/issues/4541) +- mdstat RAID0 support [\#4010](https://github.com/netdata/netdata/issues/4010) +- FreeIPMI Plugin cant graph the wattage [\#3977](https://github.com/netdata/netdata/issues/3977) +- web\_log: charts per URL [\#3111](https://github.com/netdata/netdata/issues/3111) +- FQDN in alert sending [\#2477](https://github.com/netdata/netdata/issues/2477) +- on frontend, if JavaScript is disabled, there's no graceful degradation [\#2422](https://github.com/netdata/netdata/issues/2422) +- netdata dead but pid file exists [\#2266](https://github.com/netdata/netdata/issues/2266) + +**Merged pull requests:** + +- Non-interactive uninstaller [\#5021](https://github.com/netdata/netdata/pull/5021) ([paulfantom](https://github.com/paulfantom)) +- Kavenegar returns 200 [\#5020](https://github.com/netdata/netdata/pull/5020) ([salehi](https://github.com/salehi)) +- Fix missing method\_name: kavenegar [\#5019](https://github.com/netdata/netdata/pull/5019) ([salehi](https://github.com/salehi)) +- remove cross-directory dependency in build system [\#5012](https://github.com/netdata/netdata/pull/5012) ([paulfantom](https://github.com/paulfantom)) +- Move installer dir under packaging [\#5009](https://github.com/netdata/netdata/pull/5009) ([paulfantom](https://github.com/paulfantom)) +- Show a warning if JavaScript is disabled \#2422 [\#4999](https://github.com/netdata/netdata/pull/4999) ([gmosx](https://github.com/gmosx)) +- \[python\] make units compliant with IEC standard [\#4995](https://github.com/netdata/netdata/pull/4995) ([ilyam8](https://github.com/ilyam8)) +- Integrate patches from freeipmi and set paramters [\#4993](https://github.com/netdata/netdata/pull/4993) ([Preisschild](https://github.com/Preisschild)) +- Fix crash in mdstat module [\#4992](https://github.com/netdata/netdata/pull/4992) ([vlvkobal](https://github.com/vlvkobal)) +- Update cgroup-name.sh.in [\#4991](https://github.com/netdata/netdata/pull/4991) ([n0coast](https://github.com/n0coast)) +- postgres timeouts [\#4988](https://github.com/netdata/netdata/pull/4988) ([ilyam8](https://github.com/ilyam8)) +- Make units compliant with IEC standard [\#4985](https://github.com/netdata/netdata/pull/4985) ([vlvkobal](https://github.com/vlvkobal)) +- Typo: `stab\_status` -\> `stub\_status` [\#4984](https://github.com/netdata/netdata/pull/4984) ([petecooper](https://github.com/petecooper)) +- Pass cloud\_base\_url from daemon to web/gui through hello endpoint \#4980 [\#4982](https://github.com/netdata/netdata/pull/4982) ([gmosx](https://github.com/gmosx)) +- Fix to \#4968, custom recipients were not working properly [\#4978](https://github.com/netdata/netdata/pull/4978) ([cakrit](https://github.com/cakrit)) +- Fix mdstat parsing [\#4977](https://github.com/netdata/netdata/pull/4977) ([vlvkobal](https://github.com/vlvkobal)) +- GCS access key shouldn't be encrypted [\#4976](https://github.com/netdata/netdata/pull/4976) ([paulfantom](https://github.com/paulfantom)) +- Fix accidentally changed file permissions [\#4974](https://github.com/netdata/netdata/pull/4974) ([vlvkobal](https://github.com/vlvkobal)) +- fix month 'Dec' being detected as IPv6 address in ovpn python.d plugin [\#4970](https://github.com/netdata/netdata/pull/4970) ([vpnable](https://github.com/vpnable)) +- Add support for Factorio server monitoring [\#4966](https://github.com/netdata/netdata/pull/4966) ([jonfairbanks](https://github.com/jonfairbanks)) +- Add mdstat to CMake configuration [\#4965](https://github.com/netdata/netdata/pull/4965) ([vlvkobal](https://github.com/vlvkobal)) +- Move power supply python module to proc plugin [\#4960](https://github.com/netdata/netdata/pull/4960) ([vlvkobal](https://github.com/vlvkobal)) +- dovecot readme update [\#4959](https://github.com/netdata/netdata/pull/4959) ([ilyam8](https://github.com/ilyam8)) +- Add cakrit to health codeowners [\#4953](https://github.com/netdata/netdata/pull/4953) ([cakrit](https://github.com/cakrit)) +- Prevent netdata-updater.sh from sending cron report for git stash entries [\#4952](https://github.com/netdata/netdata/pull/4952) ([cakrit](https://github.com/cakrit)) +- Temporary workaround for \#4945 [\#4951](https://github.com/netdata/netdata/pull/4951) ([cakrit](https://github.com/cakrit)) +- allow label modification [\#4949](https://github.com/netdata/netdata/pull/4949) ([paulfantom](https://github.com/paulfantom)) +- Fix link in streaming hosts list [\#4948](https://github.com/netdata/netdata/pull/4948) ([adherzog](https://github.com/adherzog)) +- Show demosite/host in GA for demo sites [\#4947](https://github.com/netdata/netdata/pull/4947) ([cakrit](https://github.com/cakrit)) +- Update GA in demosites.html [\#4946](https://github.com/netdata/netdata/pull/4946) ([cakrit](https://github.com/cakrit)) +- postgres fix: detect servers version and use the right query [\#4944](https://github.com/netdata/netdata/pull/4944) ([ilyam8](https://github.com/ilyam8)) +- Add support for providing FQDN in alarm notifications. [\#4943](https://github.com/netdata/netdata/pull/4943) ([Ferroin](https://github.com/Ferroin)) +- Add header to SMA webbox readme [\#4942](https://github.com/netdata/netdata/pull/4942) ([cakrit](https://github.com/cakrit)) +- Add doc before path to GA in static site [\#4940](https://github.com/netdata/netdata/pull/4940) ([cakrit](https://github.com/cakrit)) +- Add a Google Analytics tag to every markdown [\#4938](https://github.com/netdata/netdata/pull/4938) ([cakrit](https://github.com/cakrit)) +- Update README.md [\#4937](https://github.com/netdata/netdata/pull/4937) ([cakrit](https://github.com/cakrit)) +- python.d.plugin update [\#4936](https://github.com/netdata/netdata/pull/4936) ([ilyam8](https://github.com/ilyam8)) +- Update Performance.md [\#4935](https://github.com/netdata/netdata/pull/4935) ([cakrit](https://github.com/cakrit)) +- cleaner labeler code [\#4933](https://github.com/netdata/netdata/pull/4933) ([paulfantom](https://github.com/paulfantom)) +- use proper request types and urls to update labels [\#4931](https://github.com/netdata/netdata/pull/4931) ([paulfantom](https://github.com/paulfantom)) +- update code owners [\#4930](https://github.com/netdata/netdata/pull/4930) ([paulfantom](https://github.com/paulfantom)) +- Removed vlvkobal as a codeowner of web/gui [\#4929](https://github.com/netdata/netdata/pull/4929) ([gmosx](https://github.com/gmosx)) +- Add support for nonredundant arrays [\#4923](https://github.com/netdata/netdata/pull/4923) ([vlvkobal](https://github.com/vlvkobal)) +- Config docs improvements [\#4918](https://github.com/netdata/netdata/pull/4918) ([cakrit](https://github.com/cakrit)) +- Introduced IEC-compliant unit abbreviations \#4711 [\#4912](https://github.com/netdata/netdata/pull/4912) ([gmosx](https://github.com/gmosx)) + +## [v1.12.0-rc0](https://github.com/netdata/netdata/tree/v1.12.0-rc0) (2018-12-06) + +**Fixed bugs:** + +- nvidia\_smi module bug [\#4892](https://github.com/netdata/netdata/issues/4892) +- No alarms are running in some systems [\#4809](https://github.com/netdata/netdata/issues/4809) +- netdata-updater.sh cron report [\#4808](https://github.com/netdata/netdata/issues/4808) +- Netdata is not generating any alarms [\#4793](https://github.com/netdata/netdata/issues/4793) +- Fail2ban: Read "Restore Ban" for persistent bans [\#4769](https://github.com/netdata/netdata/issues/4769) +- Change in Incomming Webhooks Slack API breaks alerts [\#4755](https://github.com/netdata/netdata/issues/4755) +- registry items are clickable, but no action is taken [\#4721](https://github.com/netdata/netdata/issues/4721) +- Enable default alarms disabled after restart service netdata [\#4636](https://github.com/netdata/netdata/issues/4636) +- Spec file doesn't generate configure script before build [\#4570](https://github.com/netdata/netdata/issues/4570) +- sensors.chart.py ignores fans running at 0 RPM when netdata was started [\#4158](https://github.com/netdata/netdata/issues/4158) +- Postgres plugin lock output incorrect [\#4090](https://github.com/netdata/netdata/issues/4090) +- python plugins got behind by 5 seconds [\#3752](https://github.com/netdata/netdata/issues/3752) +- Constant stream of "chart took too long to be updated" INFO messages in error.log [\#3505](https://github.com/netdata/netdata/issues/3505) +- SNMP 64bit Counter Issue - Far from correct bandwidth values [\#3488](https://github.com/netdata/netdata/issues/3488) +- Update health reference documentation [\#3468](https://github.com/netdata/netdata/issues/3468) +- Alarm badge link escaping for disk paths in default dashboard [\#3253](https://github.com/netdata/netdata/issues/3253) + +**Closed issues:** + +- Docker netdata documentation [\#4899](https://github.com/netdata/netdata/issues/4899) +- Tiny Proxy monitoring [\#4834](https://github.com/netdata/netdata/issues/4834) +- Phusion Passenger monitoring [\#4833](https://github.com/netdata/netdata/issues/4833) +- Iis monitoring [\#4832](https://github.com/netdata/netdata/issues/4832) +- Scaleio monitoring [\#4828](https://github.com/netdata/netdata/issues/4828) +- Gluster monitoring [\#4827](https://github.com/netdata/netdata/issues/4827) +- Leofs monitoring [\#4826](https://github.com/netdata/netdata/issues/4826) +- Jumpy data when running on kubernetes [\#4778](https://github.com/netdata/netdata/issues/4778) +- Create documentation on how to opt-out of anonymous data collection [\#4746](https://github.com/netdata/netdata/issues/4746) +- Use `--future-release` in changelog generation [\#4718](https://github.com/netdata/netdata/issues/4718) +- requirements.txt in TLD are not related to netdata [\#4693](https://github.com/netdata/netdata/issues/4693) +- What is The Right Role for Netdata MongoDB Python Plugins? [\#4666](https://github.com/netdata/netdata/issues/4666) +- Store nightly build artifacts somewhere [\#4628](https://github.com/netdata/netdata/issues/4628) +- Remove old packaging scripts [\#4608](https://github.com/netdata/netdata/issues/4608) +- Use the new logo in web/gui [\#4598](https://github.com/netdata/netdata/issues/4598) +- Labelling bot [\#4528](https://github.com/netdata/netdata/issues/4528) +- Extract registry functionality from dashboard.js [\#4474](https://github.com/netdata/netdata/issues/4474) +- HTML Documentation [\#4439](https://github.com/netdata/netdata/issues/4439) +- New documentation structure [\#4321](https://github.com/netdata/netdata/issues/4321) +- Add instructions to debug alarm notifications [\#4319](https://github.com/netdata/netdata/issues/4319) +- Fix file classification in LGTM [\#4259](https://github.com/netdata/netdata/issues/4259) +- Add CONTRIBUTING.md [\#4146](https://github.com/netdata/netdata/issues/4146) +- Send alerts via Slack to a single user \(direct message\)? [\#3722](https://github.com/netdata/netdata/issues/3722) +- golang orchestrator [\#3589](https://github.com/netdata/netdata/issues/3589) +- \[web api\] Add /api/v1/version [\#3540](https://github.com/netdata/netdata/issues/3540) +- Feature: UKSM support [\#2994](https://github.com/netdata/netdata/issues/2994) +- web\_log reports unmatched lines [\#2295](https://github.com/netdata/netdata/issues/2295) +- Ceph support [\#1673](https://github.com/netdata/netdata/issues/1673) +- Misaligned option points of REST API v1 endpoint data [\#1628](https://github.com/netdata/netdata/issues/1628) +- Adding support for time markers [\#1195](https://github.com/netdata/netdata/issues/1195) +- Scheduled “downtime” for a type of check? [\#1133](https://github.com/netdata/netdata/issues/1133) +- split snmp.conf into several conf files possible? [\#1126](https://github.com/netdata/netdata/issues/1126) +- sensu/collectd integration [\#174](https://github.com/netdata/netdata/issues/174) + +**Merged pull requests:** + +- run shfmt on CI scripts [\#4928](https://github.com/netdata/netdata/pull/4928) ([paulfantom](https://github.com/paulfantom)) +- use relative path for logo [\#4927](https://github.com/netdata/netdata/pull/4927) ([ktsaou](https://github.com/ktsaou)) +- fix symbolic link file detection in etc [\#4926](https://github.com/netdata/netdata/pull/4926) ([ktsaou](https://github.com/ktsaou)) +- send all git log msg to fd3 [\#4922](https://github.com/netdata/netdata/pull/4922) ([paulfantom](https://github.com/paulfantom)) +- RabbitMQ chart for message rates should be "line" [\#4916](https://github.com/netdata/netdata/pull/4916) ([dex4er](https://github.com/dex4er)) +- better labeling [\#4915](https://github.com/netdata/netdata/pull/4915) ([paulfantom](https://github.com/paulfantom)) +- Improve docker installation readme, docs navbar fix [\#4914](https://github.com/netdata/netdata/pull/4914) ([cakrit](https://github.com/cakrit)) +- Use the new logo in the UI [\#4913](https://github.com/netdata/netdata/pull/4913) ([gmosx](https://github.com/gmosx)) +- fix info api method compilation warnings [\#4911](https://github.com/netdata/netdata/pull/4911) ([ktsaou](https://github.com/ktsaou)) +- smartd\_log: ata 194 attr fix [\#4908](https://github.com/netdata/netdata/pull/4908) ([ilyam8](https://github.com/ilyam8)) +- Do not update repositories in CI operating system [\#4907](https://github.com/netdata/netdata/pull/4907) ([paulfantom](https://github.com/paulfantom)) +- Don't use IE11 incompatible for-const \#4710 [\#4906](https://github.com/netdata/netdata/pull/4906) ([gmosx](https://github.com/gmosx)) +- Update python.d readme [\#4905](https://github.com/netdata/netdata/pull/4905) ([cakrit](https://github.com/cakrit)) +- do not use protected variable name in updater script [\#4902](https://github.com/netdata/netdata/pull/4902) ([paulfantom](https://github.com/paulfantom)) +- postgres module: locks count fix [\#4901](https://github.com/netdata/netdata/pull/4901) ([ilyam8](https://github.com/ilyam8)) +- treat DT\_UNKNOWN files as regular files [\#4898](https://github.com/netdata/netdata/pull/4898) ([ktsaou](https://github.com/ktsaou)) +- more health debugging to trace config files [\#4897](https://github.com/netdata/netdata/pull/4897) ([ktsaou](https://github.com/ktsaou)) +- added debug statements when loading health config files [\#4896](https://github.com/netdata/netdata/pull/4896) ([ktsaou](https://github.com/ktsaou)) +- Added info on health configuration and page for Charts [\#4895](https://github.com/netdata/netdata/pull/4895) ([cakrit](https://github.com/cakrit)) +- added more debug outpput to freeipmi [\#4894](https://github.com/netdata/netdata/pull/4894) ([ktsaou](https://github.com/ktsaou)) +- nvidia\_smi: handle `N/A` values [\#4893](https://github.com/netdata/netdata/pull/4893) ([ilyam8](https://github.com/ilyam8)) +- add api/v1/info endpoint to swagger [\#4807](https://github.com/netdata/netdata/pull/4807) ([Wing924](https://github.com/Wing924)) +- Update CONTRIBUTING.md [\#4805](https://github.com/netdata/netdata/pull/4805) ([cakrit](https://github.com/cakrit)) +- Add info from PR 208 [\#4804](https://github.com/netdata/netdata/pull/4804) ([cakrit](https://github.com/cakrit)) +- Anonymize IPs in README.md Google Analytics [\#4803](https://github.com/netdata/netdata/pull/4803) ([cakrit](https://github.com/cakrit)) +- Minor updates in htmldoc [\#4802](https://github.com/netdata/netdata/pull/4802) ([cakrit](https://github.com/cakrit)) +- Add cookie consent javascript to docs [\#4801](https://github.com/netdata/netdata/pull/4801) ([cakrit](https://github.com/cakrit)) +- Improve SYNPROXY documentation [\#4800](https://github.com/netdata/netdata/pull/4800) ([cakrit](https://github.com/cakrit)) +- Add debug instructions for python modules [\#4799](https://github.com/netdata/netdata/pull/4799) ([cakrit](https://github.com/cakrit)) +- Added Legal section to documentation, added missing link for apps.plugin [\#4797](https://github.com/netdata/netdata/pull/4797) ([cakrit](https://github.com/cakrit)) +- auto-label PRs and minor cleanup [\#4795](https://github.com/netdata/netdata/pull/4795) ([paulfantom](https://github.com/paulfantom)) +- automatic labeling of new features [\#4792](https://github.com/netdata/netdata/pull/4792) ([paulfantom](https://github.com/paulfantom)) +- Small content change to the netdata-installer.sh [\#4790](https://github.com/netdata/netdata/pull/4790) ([ei8fdb](https://github.com/ei8fdb)) +- lifecycle test [\#4789](https://github.com/netdata/netdata/pull/4789) ([paulfantom](https://github.com/paulfantom)) +- Documentation TOC bug fix [\#4787](https://github.com/netdata/netdata/pull/4787) ([cakrit](https://github.com/cakrit)) +- netdata-security doc corrections [\#4786](https://github.com/netdata/netdata/pull/4786) ([cakrit](https://github.com/cakrit)) +- Update README.md for release 1.11.1 [\#4777](https://github.com/netdata/netdata/pull/4777) ([taniaab](https://github.com/taniaab)) +- Fix typo in "Github Star" documentation [\#4776](https://github.com/netdata/netdata/pull/4776) ([josemaia](https://github.com/josemaia)) +- Added a few more debugging instructions for notifications [\#4774](https://github.com/netdata/netdata/pull/4774) ([cakrit](https://github.com/cakrit)) +- buildhtml.sh should exit with 1 if anything fails [\#4773](https://github.com/netdata/netdata/pull/4773) ([cakrit](https://github.com/cakrit)) +- fail2ban fix: add 'Restore Ban' action [\#4772](https://github.com/netdata/netdata/pull/4772) ([ilyam8](https://github.com/ilyam8)) +- add api/v1/info endpoint [\#4770](https://github.com/netdata/netdata/pull/4770) ([Wing924](https://github.com/Wing924)) +- Move mdstat python module to proc plugin [\#4768](https://github.com/netdata/netdata/pull/4768) ([vlvkobal](https://github.com/vlvkobal)) +- bugfix: query engine resampling duration [\#4759](https://github.com/netdata/netdata/pull/4759) ([ktsaou](https://github.com/ktsaou)) +- web\_log: add alarm on unmatched lines [\#4757](https://github.com/netdata/netdata/pull/4757) ([ilyam8](https://github.com/ilyam8)) +- sensors: don't ignore 0 RPM funs on start [\#4753](https://github.com/netdata/netdata/pull/4753) ([ilyam8](https://github.com/ilyam8)) +- Use var to make NETDATA variable global [\#4752](https://github.com/netdata/netdata/pull/4752) ([gmosx](https://github.com/gmosx)) +- move build Dockerfiles to external repo [\#4749](https://github.com/netdata/netdata/pull/4749) ([paulfantom](https://github.com/paulfantom)) +- remove rolling version suffix [\#4748](https://github.com/netdata/netdata/pull/4748) ([paulfantom](https://github.com/paulfantom)) +- Docs point to docs.netdata.cloud instead of wiki. Correct padding-bot… [\#4747](https://github.com/netdata/netdata/pull/4747) ([cakrit](https://github.com/cakrit)) +- Make Getting Started just a top level link [\#4740](https://github.com/netdata/netdata/pull/4740) ([cakrit](https://github.com/cakrit)) +- docker: correct invalid syntax [\#4738](https://github.com/netdata/netdata/pull/4738) ([paulfantom](https://github.com/paulfantom)) +- Make the whole title area clickable, closes \#4721 [\#4733](https://github.com/netdata/netdata/pull/4733) ([gmosx](https://github.com/gmosx)) +- Correctly apply B unit conversion [\#4724](https://github.com/netdata/netdata/pull/4724) ([gmosx](https://github.com/gmosx)) +- add more layers to container image [\#4722](https://github.com/netdata/netdata/pull/4722) ([paulfantom](https://github.com/paulfantom)) +- python.d: use real time for calc sinceLast [\#4720](https://github.com/netdata/netdata/pull/4720) ([ilyam8](https://github.com/ilyam8)) +- strictier use of URL separators [\#4716](https://github.com/netdata/netdata/pull/4716) ([ktsaou](https://github.com/ktsaou)) +- Test integrity of dashboard.js [\#4715](https://github.com/netdata/netdata/pull/4715) ([paulfantom](https://github.com/paulfantom)) +- fix\(pagerduty\): Use cURL instead of PagerDuty agent to send alarms. [\#4694](https://github.com/netdata/netdata/pull/4694) ([elisiariocouto](https://github.com/elisiariocouto)) +- lint all shell collectors code [\#4690](https://github.com/netdata/netdata/pull/4690) ([paulfantom](https://github.com/paulfantom)) +- Move cpuidle python module to proc plugin [\#4635](https://github.com/netdata/netdata/pull/4635) ([vlvkobal](https://github.com/vlvkobal)) +- Cleanup docker packaging and contrib [\#4627](https://github.com/netdata/netdata/pull/4627) ([paulfantom](https://github.com/paulfantom)) +- Better updater [\#4558](https://github.com/netdata/netdata/pull/4558) ([paulfantom](https://github.com/paulfantom)) +- Generalize the recipient finding logic and reduce the boilerplate code. [\#3960](https://github.com/netdata/netdata/pull/3960) ([Ferroin](https://github.com/Ferroin)) +- RPM spec and patches for sles 11 [\#3708](https://github.com/netdata/netdata/pull/3708) ([veksh](https://github.com/veksh)) + +## [v1.11.1](https://github.com/netdata/netdata/tree/v1.11.1) (2018-11-22) + +**Fixed bugs:** + +- Sensors module of python plugin not working \(again?\) [\#4692](https://github.com/netdata/netdata/issues/4692) +- Ubuntu 18.04 apt package is still on v1.9.0, though apt is the recommended installation method [\#4675](https://github.com/netdata/netdata/issues/4675) +- pre-built static binary install script does not detect SLES as systemd OS [\#4641](https://github.com/netdata/netdata/issues/4641) +- Sensors don`t work [\#4602](https://github.com/netdata/netdata/issues/4602) +- smartd\_log check\(\) unhandled exception: 'list' object has no attribute 'clear' [\#4583](https://github.com/netdata/netdata/issues/4583) +- 1m\_received\_traffic\_overflow alarm is faulty on 10G or 40G network interfaces [\#4577](https://github.com/netdata/netdata/issues/4577) +- 1.11 release reports as 1.10.0\_rolling [\#4572](https://github.com/netdata/netdata/issues/4572) +- update netdata error [\#4560](https://github.com/netdata/netdata/issues/4560) +- edit-config uses vi, even if it isn't the system editor [\#4549](https://github.com/netdata/netdata/issues/4549) +- inbound packets dropped inbound [\#4536](https://github.com/netdata/netdata/issues/4536) +- incremental chart algorithm doesn't handle counter wrap properly [\#4533](https://github.com/netdata/netdata/issues/4533) +- Disk full \(inodes\) due to netdata [\#4518](https://github.com/netdata/netdata/issues/4518) +- Systemd not working on Ubuntu 14.04 [\#4465](https://github.com/netdata/netdata/issues/4465) +- Links on the wiki are returning 404s [\#4408](https://github.com/netdata/netdata/issues/4408) +- It figures [\#4184](https://github.com/netdata/netdata/issues/4184) +- netdata stream clients disconnecting from netdata server [\#4049](https://github.com/netdata/netdata/issues/4049) +- False positive alarm for RAM [\#4013](https://github.com/netdata/netdata/issues/4013) +- Occasional rm "cannot remove" on netdata-updater [\#3457](https://github.com/netdata/netdata/issues/3457) +- opensuse - installation by hand issues due to hardcoded libexec in netdata-installer.sh [\#3346](https://github.com/netdata/netdata/issues/3346) +- Netdata Installation failed in Manjaro(Arch)Latest [\#2812](https://github.com/netdata/netdata/issues/2812) +- undefined applications show up in system category? [\#2385](https://github.com/netdata/netdata/issues/2385) +- memory mode map initialization slow when database is too big [\#2382](https://github.com/netdata/netdata/issues/2382) +- Long hostnames cause alignment issues in my-netdata [\#2335](https://github.com/netdata/netdata/issues/2335) +- dont get snmp running properly [\#1734](https://github.com/netdata/netdata/issues/1734) +- Plugins continue to log to old error.log after a SIGHUP [\#805](https://github.com/netdata/netdata/issues/805) + +**Closed issues:** + +- Improve footer of web/gui [\#4708](https://github.com/netdata/netdata/issues/4708) +- Ignores EMAIL\_SENDER [\#4695](https://github.com/netdata/netdata/issues/4695) +- Add option to do pre-releases in GitHub [\#4684](https://github.com/netdata/netdata/issues/4684) +- Invalid links in \*.md files [\#4672](https://github.com/netdata/netdata/issues/4672) +- Replace all wiki links with repo links in netdata files [\#4650](https://github.com/netdata/netdata/issues/4650) +- Replace http URLs with https in markdown fils [\#4626](https://github.com/netdata/netdata/issues/4626) +- Extract JS and CSS from index.html [\#4586](https://github.com/netdata/netdata/issues/4586) +- Improved management of netdata urls in the `my-netdata` menu [\#4582](https://github.com/netdata/netdata/issues/4582) +- Ignore web/gui/src in LGTM and Codacy checks. [\#4516](https://github.com/netdata/netdata/issues/4516) +- Remove excessive requestAnimationFrame\(\) compatibility checks [\#4501](https://github.com/netdata/netdata/issues/4501) +- Remove obsolete chart renderers [\#4492](https://github.com/netdata/netdata/issues/4492) +- Split dashboard.js into multiple files [\#4479](https://github.com/netdata/netdata/issues/4479) +- Hdd temperature monitoring on FreeBSD [\#4463](https://github.com/netdata/netdata/issues/4463) +- Modernize dashboard.js [\#4461](https://github.com/netdata/netdata/issues/4461) +- Documentation links sanity checker [\#4416](https://github.com/netdata/netdata/issues/4416) +- Write a blog entry about monitoring and performance tuning mysql with netdata [\#4326](https://github.com/netdata/netdata/issues/4326) +- Document supported python versions [\#4322](https://github.com/netdata/netdata/issues/4322) +- Add coverity scans to Travis [\#4248](https://github.com/netdata/netdata/issues/4248) +- Lint all shell scripts [\#4166](https://github.com/netdata/netdata/issues/4166) +- Include tests in CI pipeline [\#4133](https://github.com/netdata/netdata/issues/4133) +- Runfile installation doesn't fix earlier incorrect netdata init script [\#4009](https://github.com/netdata/netdata/issues/4009) +- http://IP:19999/lib/bootstrap-3.3.7.min.js [\#3908](https://github.com/netdata/netdata/issues/3908) +- Netdata - Spring boot plugin [\#2074](https://github.com/netdata/netdata/issues/2074) +- support standard deviation in reduce functions [\#808](https://github.com/netdata/netdata/issues/808) +- web server optimization [\#532](https://github.com/netdata/netdata/issues/532) +- Containers: running plugins in different namespaces to allow netdata collect application metrics from containers [\#474](https://github.com/netdata/netdata/issues/474) + +**Merged pull requests:** + +- Cleanup of web/gui footer [\#4709](https://github.com/netdata/netdata/pull/4709) ([gmosx](https://github.com/gmosx)) +- added byte unit scaling [\#4707](https://github.com/netdata/netdata/pull/4707) ([AndCycle](https://github.com/AndCycle)) +- Add missing quote to tc-qos-helper.sh.in [\#4703](https://github.com/netdata/netdata/pull/4703) ([drwtsn32x](https://github.com/drwtsn32x)) +- Fix typo and py2 compatibility issue. [\#4697](https://github.com/netdata/netdata/pull/4697) ([Ferroin](https://github.com/Ferroin)) +- Update Doc links for adding charts and alarms in sidebar. Isuue \#4650 [\#4669](https://github.com/netdata/netdata/pull/4669) ([nekkabcire](https://github.com/nekkabcire)) +- Update lm\_sensors and catch specific errors. [\#4667](https://github.com/netdata/netdata/pull/4667) ([Ferroin](https://github.com/Ferroin)) +- Remove left over code [\#4662](https://github.com/netdata/netdata/pull/4662) ([xPaw](https://github.com/xPaw)) +- Fix changelog path, add all README.md files to Debian package doc [\#4657](https://github.com/netdata/netdata/pull/4657) ([runejuhl](https://github.com/runejuhl)) +- properly parse network interface names with colon on them [\#4653](https://github.com/netdata/netdata/pull/4653) ([ktsaou](https://github.com/ktsaou)) +- sensors module fix [\#4651](https://github.com/netdata/netdata/pull/4651) ([ilyam8](https://github.com/ilyam8)) +- Update installer/functions.sh [\#4643](https://github.com/netdata/netdata/pull/4643) ([tsingletonacic](https://github.com/tsingletonacic)) +- Fix documentation in beanstalk.conf. [\#4639](https://github.com/netdata/netdata/pull/4639) ([Ferroin](https://github.com/Ferroin)) +- Minor cleanup of main.js [\#4634](https://github.com/netdata/netdata/pull/4634) ([gmosx](https://github.com/gmosx)) +- Fixed tc-helper plugin broken link [\#4617](https://github.com/netdata/netdata/pull/4617) ([ofirule](https://github.com/ofirule)) +- Another Readme Update [\#4612](https://github.com/netdata/netdata/pull/4612) ([ktsaou](https://github.com/ktsaou)) +- Fix spelling mistake in dashboard\_info.js [\#4601](https://github.com/netdata/netdata/pull/4601) ([hotio](https://github.com/hotio)) +- bug fix: conntrack\_max alarm was accessing invalid variable [\#4595](https://github.com/netdata/netdata/pull/4595) ([ktsaou](https://github.com/ktsaou)) +- fixed max interface speed calculation [\#4594](https://github.com/netdata/netdata/pull/4594) ([ktsaou](https://github.com/ktsaou)) +- Issue 4582 \(Show alternate urls in my-netdata menu\) [\#4590](https://github.com/netdata/netdata/pull/4590) ([gmosx](https://github.com/gmosx)) +- nvidia\_smi: init version added [\#4589](https://github.com/netdata/netdata/pull/4589) ([ilyam8](https://github.com/ilyam8)) +- smartd\_log: py2 compatibility fix [\#4584](https://github.com/netdata/netdata/pull/4584) ([ilyam8](https://github.com/ilyam8)) +- Split js 2 [\#4581](https://github.com/netdata/netdata/pull/4581) ([gmosx](https://github.com/gmosx)) +- Alerta.io notification improvements [\#4576](https://github.com/netdata/netdata/pull/4576) ([satterly](https://github.com/satterly)) +- netdata-openrc: Move check from depends\(\) to start\_pre\(\) [\#4575](https://github.com/netdata/netdata/pull/4575) ([aadityabagga](https://github.com/aadityabagga)) +- Fix badges link that leads to 404. [\#4569](https://github.com/netdata/netdata/pull/4569) ([nekkabcire](https://github.com/nekkabcire)) +- Move cpufreq python module to proc plugin [\#4562](https://github.com/netdata/netdata/pull/4562) ([vlvkobal](https://github.com/vlvkobal)) +- decouple nightly cron jobs from packaging stage [\#4559](https://github.com/netdata/netdata/pull/4559) ([paulfantom](https://github.com/paulfantom)) +- Clarify application configuration and fix broken link [\#4554](https://github.com/netdata/netdata/pull/4554) ([JBaczuk](https://github.com/JBaczuk)) +- edit-config: Better support for custom editors. [\#4551](https://github.com/netdata/netdata/pull/4551) ([Ferroin](https://github.com/Ferroin)) +- add tor python module [\#4546](https://github.com/netdata/netdata/pull/4546) ([ilyam8](https://github.com/ilyam8)) +- incremental overflows should not show zeros values [\#4538](https://github.com/netdata/netdata/pull/4538) ([ktsaou](https://github.com/ktsaou)) +- smartd\_log refactor plus SCSI support [\#4523](https://github.com/netdata/netdata/pull/4523) ([ilyam8](https://github.com/ilyam8)) +- openldap monitoring plugin added [\#4513](https://github.com/netdata/netdata/pull/4513) ([ekartsonakis](https://github.com/ekartsonakis)) +- Refactoring dashboard.js, splitting monolithic file into multiple source files. [\#4496](https://github.com/netdata/netdata/pull/4496) ([gmosx](https://github.com/gmosx)) +- Switch e-mail threading to be enabled by default. [\#3780](https://github.com/netdata/netdata/pull/3780) ([Ferroin](https://github.com/Ferroin)) + +## [v1.11.0](https://github.com/netdata/netdata/tree/v1.11.0) (2018-11-02) + +**Fixed bugs:** + +- Cannot use oidname in snmp config [\#4512](https://github.com/netdata/netdata/issues/4512) +- config.status: error: cannot find input file: `web/api/badges/Makefile.in' [\#4502](https://github.com/netdata/netdata/issues/4502) +- Diskspace plugin accesses excluded filesystem and stalls netdata process [\#4491](https://github.com/netdata/netdata/issues/4491) +- netdata allocates 170MB memory after startup \(without the database\) [\#4487](https://github.com/netdata/netdata/issues/4487) +- Logcheck security alert: netdata : command not allowed ; TTY=unknown ; PWD=/etc/netdata ; USER=root ; COMMAND=validate [\#4473](https://github.com/netdata/netdata/issues/4473) +- duplicate name in cgroup if dash present in container name [\#4468](https://github.com/netdata/netdata/issues/4468) +- Wrong logos in infographic [\#4455](https://github.com/netdata/netdata/issues/4455) +- Netdata in Docker cannot load stock config \(permission denied\) [\#4453](https://github.com/netdata/netdata/issues/4453) +- Icecast module not working [\#4432](https://github.com/netdata/netdata/issues/4432) +- Installer does not detect systemd on Ubuntu 14.04 [\#4421](https://github.com/netdata/netdata/issues/4421) +- netdata.spec seems to reference missing files [\#4409](https://github.com/netdata/netdata/issues/4409) +- mongodb.chart.py does not check pymongo version [\#4407](https://github.com/netdata/netdata/issues/4407) +- node.d.plugin issue after modularizing plugins commit [\#4406](https://github.com/netdata/netdata/issues/4406) +- netdata: CONFIG: cannot load user config ‘/etc/netdata/stream.conf’. Will try stock config. [\#4403](https://github.com/netdata/netdata/issues/4403) +- netdata \(20181015\) compiles fine but 'make dist' aborts [\#4400](https://github.com/netdata/netdata/issues/4400) +- netdata does not compile on FreeBSD 11.2-RELEASE-p4 [\#4393](https://github.com/netdata/netdata/issues/4393) +- API documentation cannot be read [\#4371](https://github.com/netdata/netdata/issues/4371) +- Error message: Cannot open file stream.conf [\#4341](https://github.com/netdata/netdata/issues/4341) +- MegaCli Plugin fails to parse [\#4278](https://github.com/netdata/netdata/issues/4278) +- Apps plugin: wrong open\_sockets counter when fd type changes [\#4233](https://github.com/netdata/netdata/issues/4233) +- Logind bug [\#4230](https://github.com/netdata/netdata/issues/4230) +- Should netdata identify the js binary as NodeJS by default? [\#4217](https://github.com/netdata/netdata/issues/4217) +- Cannot load jQuery: ERROR 101 [\#4212](https://github.com/netdata/netdata/issues/4212) +- redis.chart.py stops with error "check\(\) unhandled exception: 'rdb\_bgsave\_in\_progress'" [\#4204](https://github.com/netdata/netdata/issues/4204) +- Failed to start netdata.service: Exec format error [\#4169](https://github.com/netdata/netdata/issues/4169) +- python clocks don't work under FreeBSD [\#4152](https://github.com/netdata/netdata/issues/4152) +- error: cannot take the address of an rvalue of type 'FILE \*' when building on OpenBSD [\#4145](https://github.com/netdata/netdata/issues/4145) +- packages installer failed [\#4119](https://github.com/netdata/netdata/issues/4119) +- update\_every in postgres plugin [\#4089](https://github.com/netdata/netdata/issues/4089) +- /proc/interrupts plugin memory leak [\#4051](https://github.com/netdata/netdata/issues/4051) +- Problem with logrotate config \(PID discovery\) [\#4020](https://github.com/netdata/netdata/issues/4020) +- \[SECURITY\] Mitigate CVE-2017-18342 [\#4012](https://github.com/netdata/netdata/issues/4012) +- Netdata looks in ../../../../ to get it's config [\#3988](https://github.com/netdata/netdata/issues/3988) +- Statsd counters/gauges stuck on -167,772,150,000,000 [\#3978](https://github.com/netdata/netdata/issues/3978) +- After netdata slave is rebooted, timestamp doesn't match [\#3966](https://github.com/netdata/netdata/issues/3966) +- netdata-updater.sh fails due to missing './' on 'netdata-installer.sh' line [\#3940](https://github.com/netdata/netdata/issues/3940) +- Problem with running any python.d plugin? [\#3854](https://github.com/netdata/netdata/issues/3854) +- Alert email syntax problem [\#3843](https://github.com/netdata/netdata/issues/3843) +- kickstart-static64.sh fails with sh as root [\#3840](https://github.com/netdata/netdata/issues/3840) +- Illegal characters in URLs [\#3819](https://github.com/netdata/netdata/issues/3819) +- Use bash loadable sleep in tc-qos-helper.sh [\#3754](https://github.com/netdata/netdata/issues/3754) +- btrfs shows wrong disk space when the filesystem has sector size 4k but the logical disk sector size is 512B [\#3746](https://github.com/netdata/netdata/issues/3746) +- ERROR 405 with squid and web\_logs plugin [\#3738](https://github.com/netdata/netdata/issues/3738) +- tcp listen alarm integer expression expected [\#3733](https://github.com/netdata/netdata/issues/3733) +- Cannot load required JS library: http://ipaddress:19999/dashboard\_info.js?v20180510-2 after update or fresh install [\#3707](https://github.com/netdata/netdata/issues/3707) +- IPv4 UDPLite stats are always visible, even if UDPLite is not used on a system [\#3706](https://github.com/netdata/netdata/issues/3706) +- When listening on a unix socket, web server still attempts to set TCP\_NODELAY. [\#3682](https://github.com/netdata/netdata/issues/3682) +- httpcheck do not accept URLs that do not end with com [\#3656](https://github.com/netdata/netdata/issues/3656) +- httpcheck python.d plugin fails [\#3641](https://github.com/netdata/netdata/issues/3641) +- Issue with statsd sample rate [\#3630](https://github.com/netdata/netdata/issues/3630) +- NetData and Kubernetes - Docker Name [\#3369](https://github.com/netdata/netdata/issues/3369) +- netdata-uninstaller.sh not working \(with macOS 10.13\) [\#2941](https://github.com/netdata/netdata/issues/2941) +- Problem with plugins in debug mode \(wrong path to cfgs\) [\#2593](https://github.com/netdata/netdata/issues/2593) +- dashboard with thousands of charts [\#2275](https://github.com/netdata/netdata/issues/2275) +- fix docker image tagging problem [\#4250](https://github.com/netdata/netdata/pull/4250) ([paulfantom](https://github.com/paulfantom)) + +**Closed issues:** + +- Feature request: Support for Adaptec RAID [\#4396](https://github.com/netdata/netdata/issues/4396) +- Is there any way to diable the example chart? [\#4384](https://github.com/netdata/netdata/issues/4384) +- modularize c source [\#4339](https://github.com/netdata/netdata/issues/4339) +- Diff migration of Wiki updates to new documentation [\#4320](https://github.com/netdata/netdata/issues/4320) +- Change GPL-3.0+ to GPL-3.0-or-later in all SPDX headers [\#4274](https://github.com/netdata/netdata/issues/4274) +- How to stop some metrics to save bandwidth [\#4223](https://github.com/netdata/netdata/issues/4223) +- UTC Timezone [\#4202](https://github.com/netdata/netdata/issues/4202) +- stock config files should be in `/usr/lib/netdata/` [\#4182](https://github.com/netdata/netdata/issues/4182) +- Lint python code \(PEP8 standard\) [\#4167](https://github.com/netdata/netdata/issues/4167) +- Fail2ban'd IPv6 addresses are not processed [\#4144](https://github.com/netdata/netdata/issues/4144) +- httpcheck support for HTTP methods \(e.g. GET, OPTIONS, HEAD, etc...\) [\#4127](https://github.com/netdata/netdata/issues/4127) +- Naming for Diskstats for Veritas Volume Manger disks [\#4116](https://github.com/netdata/netdata/issues/4116) +- Raise an alarm when a docker container is unhealthy [\#4111](https://github.com/netdata/netdata/issues/4111) +- elasticsearch plugin python json exception if another service running on port 9200 [\#4092](https://github.com/netdata/netdata/issues/4092) +- varnish 5 support [\#4073](https://github.com/netdata/netdata/issues/4073) +- 'Other' is the Largest Category Under Applications \> Mem due to Node processes [\#4063](https://github.com/netdata/netdata/issues/4063) +- send netdata health monitoring variables to backends [\#4035](https://github.com/netdata/netdata/issues/4035) +- Badges - seconds units [\#4029](https://github.com/netdata/netdata/issues/4029) +- Web\_log doesn't support response times in nanoseconds [\#4003](https://github.com/netdata/netdata/issues/4003) +- 400 error when netdata tries to send slack notification [\#3989](https://github.com/netdata/netdata/issues/3989) +- Disable probing device mapper [\#3974](https://github.com/netdata/netdata/issues/3974) +- MySQL Python Plugin not work [\#3968](https://github.com/netdata/netdata/issues/3968) +- How to enable sensor plugin? [\#3953](https://github.com/netdata/netdata/issues/3953) +- netdata does not appear to send host tags via graphite backend [\#3936](https://github.com/netdata/netdata/issues/3936) +- Netdata breaks suspend in debian stretch [\#3842](https://github.com/netdata/netdata/issues/3842) +- NUT ups names [\#3829](https://github.com/netdata/netdata/issues/3829) +- New at netdata and a lot of alarms [\#3826](https://github.com/netdata/netdata/issues/3826) +- \[REQUEST\] Add Fleep/webhook notifications [\#3792](https://github.com/netdata/netdata/issues/3792) +- Enhancement Redis protocol\(Pika\) ? [\#3783](https://github.com/netdata/netdata/issues/3783) +- Plugin for Litespeed stats [\#3781](https://github.com/netdata/netdata/issues/3781) +- Do you have in plan to implement Megacli \(hardware RAID\) support metrics ? [\#3757](https://github.com/netdata/netdata/issues/3757) +- Add a Safari pinned tab icon [\#3743](https://github.com/netdata/netdata/issues/3743) +- Colors for BTRFS graphs are inconsistent [\#3719](https://github.com/netdata/netdata/issues/3719) +- \[Information\] Adding tutorial for Netdata in HTTPS for Plesk systems [\#3717](https://github.com/netdata/netdata/issues/3717) +- hddtemp module fails: received data doesn't have needed records [\#3683](https://github.com/netdata/netdata/issues/3683) +- "alarm-notify.sh test" produces error exit code on success [\#3667](https://github.com/netdata/netdata/issues/3667) +- init file is not installed on Amazon Linux 2018.03 [\#3650](https://github.com/netdata/netdata/issues/3650) +- Option to prevent netdata dashboard.js from downloading FontAwesome [\#3644](https://github.com/netdata/netdata/issues/3644) +- FYI: Homebrew formula \(package\) of netdata for macOS [\#3642](https://github.com/netdata/netdata/issues/3642) +- python.d nginx module -- stub status from https server block on localhost? [\#3628](https://github.com/netdata/netdata/issues/3628) +- mdadm mismatch\_cnt statistic/alarm [\#3622](https://github.com/netdata/netdata/issues/3622) +- Python.d postgres unhandled exception [\#3614](https://github.com/netdata/netdata/issues/3614) +- Support for RethinkDB stats [\#3422](https://github.com/netdata/netdata/issues/3422) +- Notifications to Microsoft Teams [\#3330](https://github.com/netdata/netdata/issues/3330) +- enable system alarms on freebsd [\#3267](https://github.com/netdata/netdata/issues/3267) +- web\_log: response time should support summary or histgram [\#3102](https://github.com/netdata/netdata/issues/3102) +- Alarm for big system load [\#3003](https://github.com/netdata/netdata/issues/3003) +- Illegal instruction - Debian Stretch i586 [\#2909](https://github.com/netdata/netdata/issues/2909) +- New documentation \[bounty\] [\#2638](https://github.com/netdata/netdata/issues/2638) +- web\_log: support squid logs [\#2235](https://github.com/netdata/netdata/issues/2235) +- Monitoring PHP APCu [\#2199](https://github.com/netdata/netdata/issues/2199) +- MySQLService \(or DatabaseService\) for python.d [\#1906](https://github.com/netdata/netdata/issues/1906) +- RocketChat notifications [\#1811](https://github.com/netdata/netdata/issues/1811) +- SCTP Information [\#1218](https://github.com/netdata/netdata/issues/1218) +- python.d enhancements [\#692](https://github.com/netdata/netdata/issues/692) +- feature request: pause all data processing if noone is watching the graphs [\#656](https://github.com/netdata/netdata/issues/656) +- netdata package maintainers [\#651](https://github.com/netdata/netdata/issues/651) + +**Merged pull requests:** + +- Changed swagger editor url to the correct one [\#4539](https://github.com/netdata/netdata/pull/4539) ([infeeeee](https://github.com/infeeeee)) +- fixed wrong annotations given to google charts [\#4535](https://github.com/netdata/netdata/pull/4535) ([ktsaou](https://github.com/ktsaou)) +- allow debugging memory per module [\#4524](https://github.com/netdata/netdata/pull/4524) ([ktsaou](https://github.com/ktsaou)) +- fixed vulnerabilities identified by red4sec.com [\#4521](https://github.com/netdata/netdata/pull/4521) ([ktsaou](https://github.com/ktsaou)) +- Do not enable unused per core interrupts by default [\#4519](https://github.com/netdata/netdata/pull/4519) ([ktsaou](https://github.com/ktsaou)) +- exclude web/gui/src from codacy checks [\#4515](https://github.com/netdata/netdata/pull/4515) ([paulfantom](https://github.com/paulfantom)) +- do not send duplicate chart names while streaming metrics [\#4508](https://github.com/netdata/netdata/pull/4508) ([ktsaou](https://github.com/ktsaou)) +- fix RPM build [\#4507](https://github.com/netdata/netdata/pull/4507) ([ktsaou](https://github.com/ktsaou)) +- Split the API formatters in modules [\#4504](https://github.com/netdata/netdata/pull/4504) ([ktsaou](https://github.com/ktsaou)) +- fixed rpm build; [\#4503](https://github.com/netdata/netdata/pull/4503) ([ktsaou](https://github.com/ktsaou)) +- Fix\(snmp\): fix parse oidname and santilize dimension name [\#4498](https://github.com/netdata/netdata/pull/4498) ([Ehekatl](https://github.com/Ehekatl)) +- fix query min-max, again... [\#4495](https://github.com/netdata/netdata/pull/4495) ([ktsaou](https://github.com/ktsaou)) +- diskspace plugin should not stat\(\) excluded mountpoints [\#4494](https://github.com/netdata/netdata/pull/4494) ([ktsaou](https://github.com/ktsaou)) +- restored min-max calculation of RRDR [\#4489](https://github.com/netdata/netdata/pull/4489) ([ktsaou](https://github.com/ktsaou)) +- query engine documentation and stats [\#4483](https://github.com/netdata/netdata/pull/4483) ([ktsaou](https://github.com/ktsaou)) +- fix query sum [\#4482](https://github.com/netdata/netdata/pull/4482) ([ktsaou](https://github.com/ktsaou)) +- query code cleanup [\#4480](https://github.com/netdata/netdata/pull/4480) ([ktsaou](https://github.com/ktsaou)) +- Fix checking of grouping time [\#4478](https://github.com/netdata/netdata/pull/4478) ([vlvkobal](https://github.com/vlvkobal)) +- Disable python sudo modules by default [\#4477](https://github.com/netdata/netdata/pull/4477) ([ilyam8](https://github.com/ilyam8)) +- bug-fix: fixed aligned queries that returned no data [\#4472](https://github.com/netdata/netdata/pull/4472) ([ktsaou](https://github.com/ktsaou)) +- Add proxysql to python.d.plugin Makefile.am [\#4466](https://github.com/netdata/netdata/pull/4466) ([alibo](https://github.com/alibo)) +- updated tests for the new hierarchy [\#4464](https://github.com/netdata/netdata/pull/4464) ([ktsaou](https://github.com/ktsaou)) +- Remove duplicated entry and put modules in order in python.d.conf [\#4460](https://github.com/netdata/netdata/pull/4460) ([vladmovchan](https://github.com/vladmovchan)) +- fix permissions for config files in a container [\#4454](https://github.com/netdata/netdata/pull/4454) ([paulfantom](https://github.com/paulfantom)) +- mongodb fix [\#4449](https://github.com/netdata/netdata/pull/4449) ([ilyam8](https://github.com/ilyam8)) +- icecast bugfix [\#4448](https://github.com/netdata/netdata/pull/4448) ([ilyam8](https://github.com/ilyam8)) +- invalidate incorrect rpm spec changelog [\#4445](https://github.com/netdata/netdata/pull/4445) ([paulfantom](https://github.com/paulfantom)) +- modularize the query api [\#4443](https://github.com/netdata/netdata/pull/4443) ([ktsaou](https://github.com/ktsaou)) +- Indicate FreeIPMI support for FreeBSD [\#4440](https://github.com/netdata/netdata/pull/4440) ([openspork](https://github.com/openspork)) +- remove unused variables [\#4437](https://github.com/netdata/netdata/pull/4437) ([paulfantom](https://github.com/paulfantom)) +- Feat: detect NIC speed and alarm on each device for net traffic overflow [\#4430](https://github.com/netdata/netdata/pull/4430) ([Ehekatl](https://github.com/Ehekatl)) +- fix streaming bug [\#4425](https://github.com/netdata/netdata/pull/4425) ([ktsaou](https://github.com/ktsaou)) +- fix systemd detection; [\#4423](https://github.com/netdata/netdata/pull/4423) ([ktsaou](https://github.com/ktsaou)) +- moved stream.conf initialization after log files have been open [\#4422](https://github.com/netdata/netdata/pull/4422) ([ktsaou](https://github.com/ktsaou)) +- Fix cmake build on macos [\#4420](https://github.com/netdata/netdata/pull/4420) ([Ehekatl](https://github.com/Ehekatl)) +- Evaluate $used\_ram\_to\_ignore on FreeBSD [\#4419](https://github.com/netdata/netdata/pull/4419) ([openspork](https://github.com/openspork)) +- fix node.d.plugin; [\#4413](https://github.com/netdata/netdata/pull/4413) ([ktsaou](https://github.com/ktsaou)) +- fix netdata.spec for new directory structure [\#4410](https://github.com/netdata/netdata/pull/4410) ([ktsaou](https://github.com/ktsaou)) +- Added uwsgi plugin [\#4404](https://github.com/netdata/netdata/pull/4404) ([robbert-ef](https://github.com/robbert-ef)) +- Add sendmail into the list of mail servers [\#4402](https://github.com/netdata/netdata/pull/4402) ([vladmovchan](https://github.com/vladmovchan)) +- Fix make dist [\#4401](https://github.com/netdata/netdata/pull/4401) ([ktsaou](https://github.com/ktsaou)) +- fix compilation on FreeBSD; [\#4398](https://github.com/netdata/netdata/pull/4398) ([ktsaou](https://github.com/ktsaou)) +- modularized all source code [\#4391](https://github.com/netdata/netdata/pull/4391) ([ktsaou](https://github.com/ktsaou)) +- Account "Laundry" pages on FreeBSD [\#4390](https://github.com/netdata/netdata/pull/4390) ([vladmovchan](https://github.com/vladmovchan)) +- normalized plugin names on all plugins; [\#4387](https://github.com/netdata/netdata/pull/4387) ([ktsaou](https://github.com/ktsaou)) +- updated swagger info for HTTPS [\#4386](https://github.com/netdata/netdata/pull/4386) ([ktsaou](https://github.com/ktsaou)) +- make future code PEP8 compliant [\#4382](https://github.com/netdata/netdata/pull/4382) ([paulfantom](https://github.com/paulfantom)) +- modularize C source code [\#4372](https://github.com/netdata/netdata/pull/4372) ([ktsaou](https://github.com/ktsaou)) +- fix docker builds [\#4367](https://github.com/netdata/netdata/pull/4367) ([paulfantom](https://github.com/paulfantom)) +- add option to run netdata in the background [\#4364](https://github.com/netdata/netdata/pull/4364) ([pohzipohzi](https://github.com/pohzipohzi)) +- support filtering of charts during streaming; [\#4361](https://github.com/netdata/netdata/pull/4361) ([ktsaou](https://github.com/ktsaou)) +- edit-config should use . instead of source with /bin/sh [\#4360](https://github.com/netdata/netdata/pull/4360) ([ktsaou](https://github.com/ktsaou)) +- send pipes URL encoded [\#4358](https://github.com/netdata/netdata/pull/4358) ([ktsaou](https://github.com/ktsaou)) +- updated configs.signatures [\#4356](https://github.com/netdata/netdata/pull/4356) ([ktsaou](https://github.com/ktsaou)) +- Fix firehol image tagging [\#4355](https://github.com/netdata/netdata/pull/4355) ([paulfantom](https://github.com/paulfantom)) +- Fix apache ipv6 configuration [\#4349](https://github.com/netdata/netdata/pull/4349) ([candrews](https://github.com/candrews)) +- Fix phpfpm ipv6 configuration [\#4348](https://github.com/netdata/netdata/pull/4348) ([candrews](https://github.com/candrews)) +- Add query types to mysql plugin [\#4347](https://github.com/netdata/netdata/pull/4347) ([roedie](https://github.com/roedie)) +- ExecutableService: return \[\] instead of None if no data. [\#4346](https://github.com/netdata/netdata/pull/4346) ([Ferroin](https://github.com/Ferroin)) +- Fix the last few PEP 8 compliance issues. [\#4345](https://github.com/netdata/netdata/pull/4345) ([Ferroin](https://github.com/Ferroin)) +- log flood should not be disabled; [\#4344](https://github.com/netdata/netdata/pull/4344) ([ktsaou](https://github.com/ktsaou)) +- better daemon errors about files; [\#4342](https://github.com/netdata/netdata/pull/4342) ([ktsaou](https://github.com/ktsaou)) +- added edit-config [\#4338](https://github.com/netdata/netdata/pull/4338) ([ktsaou](https://github.com/ktsaou)) +- Fix BIND outgoing stats in a multiview environment [\#4337](https://github.com/netdata/netdata/pull/4337) ([vobruba-martin](https://github.com/vobruba-martin)) +- fixes coverity identified issues [\#4333](https://github.com/netdata/netdata/pull/4333) ([ktsaou](https://github.com/ktsaou)) +- Use newer docker in CI build [\#4332](https://github.com/netdata/netdata/pull/4332) ([paulfantom](https://github.com/paulfantom)) +- fix docker image [\#4330](https://github.com/netdata/netdata/pull/4330) ([paulfantom](https://github.com/paulfantom)) +- Auto-releaser [\#4328](https://github.com/netdata/netdata/pull/4328) ([paulfantom](https://github.com/paulfantom)) +- fix spdx headers [\#4327](https://github.com/netdata/netdata/pull/4327) ([paulfantom](https://github.com/paulfantom)) +- updated LGTM URLs [\#4317](https://github.com/netdata/netdata/pull/4317) ([ktsaou](https://github.com/ktsaou)) +- more code owners [\#4316](https://github.com/netdata/netdata/pull/4316) ([paulfantom](https://github.com/paulfantom)) +- Use docker manifests [\#4315](https://github.com/netdata/netdata/pull/4315) ([paulfantom](https://github.com/paulfantom)) +- install some libs for coverity [\#4314](https://github.com/netdata/netdata/pull/4314) ([paulfantom](https://github.com/paulfantom)) +- cleanup FIXME tags [\#4309](https://github.com/netdata/netdata/pull/4309) ([ktsaou](https://github.com/ktsaou)) +- force symlink of netdata-updater.sh [\#4307](https://github.com/netdata/netdata/pull/4307) ([mrdrogdrog](https://github.com/mrdrogdrog)) +- Build OSX earlier than linux [\#4305](https://github.com/netdata/netdata/pull/4305) ([paulfantom](https://github.com/paulfantom)) +- Fix coverity-scan.sh [\#4304](https://github.com/netdata/netdata/pull/4304) ([paulfantom](https://github.com/paulfantom)) +- Python.d PEP 8 cleanup, modules S-Z [\#4302](https://github.com/netdata/netdata/pull/4302) ([Ferroin](https://github.com/Ferroin)) +- Python.d PEP 8 cleanup, modules P-R [\#4299](https://github.com/netdata/netdata/pull/4299) ([Ferroin](https://github.com/Ferroin)) +- Python.d/postgres.chart.py PEP 8 code cleanup [\#4298](https://github.com/netdata/netdata/pull/4298) ([Ferroin](https://github.com/Ferroin)) +- Python.d PEP 8 cleanup, modules N-O [\#4297](https://github.com/netdata/netdata/pull/4297) ([Ferroin](https://github.com/Ferroin)) +- reproducible build system [\#4294](https://github.com/netdata/netdata/pull/4294) ([paulfantom](https://github.com/paulfantom)) +- update variable after install [\#4292](https://github.com/netdata/netdata/pull/4292) ([paulfantom](https://github.com/paulfantom)) +- give credit where credit is due [\#4291](https://github.com/netdata/netdata/pull/4291) ([paulfantom](https://github.com/paulfantom)) +- fix typo in coverity scan script [\#4290](https://github.com/netdata/netdata/pull/4290) ([paulfantom](https://github.com/paulfantom)) +- Python.d PEP 8 cleanup, modules M [\#4289](https://github.com/netdata/netdata/pull/4289) ([Ferroin](https://github.com/Ferroin)) +- Python.d PEP 8 cleanup, modules I-L [\#4288](https://github.com/netdata/netdata/pull/4288) ([Ferroin](https://github.com/Ferroin)) +- Python.d PEP 8 cleanup, modules D-H [\#4287](https://github.com/netdata/netdata/pull/4287) ([Ferroin](https://github.com/Ferroin)) +- Python.d PEP 8 cleanup, modules A-C [\#4286](https://github.com/netdata/netdata/pull/4286) ([Ferroin](https://github.com/Ferroin)) +- Fix typo in documentation [\#4284](https://github.com/netdata/netdata/pull/4284) ([eduherminio](https://github.com/eduherminio)) +- stock configs in /usr/lib/netdata [\#4283](https://github.com/netdata/netdata/pull/4283) ([ktsaou](https://github.com/ktsaou)) +- use flake8 instead of pylint [\#4282](https://github.com/netdata/netdata/pull/4282) ([paulfantom](https://github.com/paulfantom)) +- tcp syn and accept queue charts and alarms [\#4281](https://github.com/netdata/netdata/pull/4281) ([ktsaou](https://github.com/ktsaou)) +- add code of conduct [\#4280](https://github.com/netdata/netdata/pull/4280) ([paulfantom](https://github.com/paulfantom)) +- megacli plugin: adapter regex update [\#4279](https://github.com/netdata/netdata/pull/4279) ([ilyam8](https://github.com/ilyam8)) +- Allow DOCKER\_HOST env variable to override default docker socket path [\#4277](https://github.com/netdata/netdata/pull/4277) ([xginn8](https://github.com/xginn8)) +- Add other common IoT services to apps\_groups.conf [\#4276](https://github.com/netdata/netdata/pull/4276) ([xginn8](https://github.com/xginn8)) +- fix python warnings identified by LGTM [\#4275](https://github.com/netdata/netdata/pull/4275) ([ilyam8](https://github.com/ilyam8)) +- nightly builds + coverity scan [\#4273](https://github.com/netdata/netdata/pull/4273) ([paulfantom](https://github.com/paulfantom)) +- better lgtm config [\#4272](https://github.com/netdata/netdata/pull/4272) ([paulfantom](https://github.com/paulfantom)) +- Fixup small python-logind typos [\#4271](https://github.com/netdata/netdata/pull/4271) ([xginn8](https://github.com/xginn8)) +- Fix several typos in documentation [\#4270](https://github.com/netdata/netdata/pull/4270) ([Calinou](https://github.com/Calinou)) +- \[WIP\] LGTM tag classification [\#4269](https://github.com/netdata/netdata/pull/4269) ([paulfantom](https://github.com/paulfantom)) +- create stale bot integration [\#4268](https://github.com/netdata/netdata/pull/4268) ([paulfantom](https://github.com/paulfantom)) +- apps.plugin fixes [\#4267](https://github.com/netdata/netdata/pull/4267) ([ktsaou](https://github.com/ktsaou)) +- finetune Code Owners entries [\#4264](https://github.com/netdata/netdata/pull/4264) ([paulfantom](https://github.com/paulfantom)) +- updated readme for netdata org [\#4262](https://github.com/netdata/netdata/pull/4262) ([ktsaou](https://github.com/ktsaou)) +- \[cleanup crusade\] Shellcheck [\#4261](https://github.com/netdata/netdata/pull/4261) ([paulfantom](https://github.com/paulfantom)) +- \[WIP\] release less artifacts [\#4260](https://github.com/netdata/netdata/pull/4260) ([paulfantom](https://github.com/paulfantom)) +- Make method in url service configurable [\#4257](https://github.com/netdata/netdata/pull/4257) ([ccremer](https://github.com/ccremer)) +- Fix typo in documentation [\#4255](https://github.com/netdata/netdata/pull/4255) ([olivierlambert](https://github.com/olivierlambert)) +- coverity should still use firehol/netdata until we find a solution [\#4253](https://github.com/netdata/netdata/pull/4253) ([ktsaou](https://github.com/ktsaou)) +- fix badges in README.md [\#4251](https://github.com/netdata/netdata/pull/4251) ([paulfantom](https://github.com/paulfantom)) +- replaced referenced to firehol github org with netdata github org [\#4249](https://github.com/netdata/netdata/pull/4249) ([ktsaou](https://github.com/ktsaou)) +- Travis and docker setup after migration [\#4247](https://github.com/netdata/netdata/pull/4247) ([paulfantom](https://github.com/paulfantom)) +- collect TcpExtTCPReqQFullDrop; [\#4246](https://github.com/netdata/netdata/pull/4246) ([ktsaou](https://github.com/ktsaou)) +- fixed typo in prometheus\_all\_hosts output [\#4245](https://github.com/netdata/netdata/pull/4245) ([ktsaou](https://github.com/ktsaou)) +- apps.plugin now checks fds for changes, with adaptive caching [\#4243](https://github.com/netdata/netdata/pull/4243) ([ktsaou](https://github.com/ktsaou)) +- Added -NoLog parameter for megacli calls [\#4242](https://github.com/netdata/netdata/pull/4242) ([vobruba-martin](https://github.com/vobruba-martin)) +- updated configs.signatures [\#4240](https://github.com/netdata/netdata/pull/4240) ([ktsaou](https://github.com/ktsaou)) +- command js is not node.js [\#4239](https://github.com/netdata/netdata/pull/4239) ([ktsaou](https://github.com/ktsaou)) +- Fix missing comma in couchdb module. [\#4238](https://github.com/netdata/netdata/pull/4238) ([Ferroin](https://github.com/Ferroin)) +- Fix LGTM complaints in monit module. [\#4237](https://github.com/netdata/netdata/pull/4237) ([Ferroin](https://github.com/Ferroin)) +- daemon cleanup [\#4231](https://github.com/netdata/netdata/pull/4231) ([ktsaou](https://github.com/ktsaou)) +- varnish plugin bugfix [\#4228](https://github.com/netdata/netdata/pull/4228) ([ilyam8](https://github.com/ilyam8)) +- CLA signing using cla-assistant.io [\#4226](https://github.com/netdata/netdata/pull/4226) ([ktsaou](https://github.com/ktsaou)) +- Disable IPFS Pin API [\#4224](https://github.com/netdata/netdata/pull/4224) ([jkpit](https://github.com/jkpit)) +- fixes identified by LGTM [\#4220](https://github.com/netdata/netdata/pull/4220) ([ktsaou](https://github.com/ktsaou)) +- workaround for LGTM false-positives [\#4218](https://github.com/netdata/netdata/pull/4218) ([ktsaou](https://github.com/ktsaou)) +- fixed issues identified by lgtm [\#4216](https://github.com/netdata/netdata/pull/4216) ([ktsaou](https://github.com/ktsaou)) +- fix netdata server URL detection in dashboard.js; [\#4215](https://github.com/netdata/netdata/pull/4215) ([ktsaou](https://github.com/ktsaou)) +- Create lgtm config [\#4213](https://github.com/netdata/netdata/pull/4213) ([paulfantom](https://github.com/paulfantom)) +- more LGTM minor fixes [\#4211](https://github.com/netdata/netdata/pull/4211) ([ktsaou](https://github.com/ktsaou)) +- updated configs.signatures [\#4210](https://github.com/netdata/netdata/pull/4210) ([ktsaou](https://github.com/ktsaou)) +- fixes identified by LGTM [\#4209](https://github.com/netdata/netdata/pull/4209) ([ktsaou](https://github.com/ktsaou)) +- allow empty values in config settings; [\#4208](https://github.com/netdata/netdata/pull/4208) ([ktsaou](https://github.com/ktsaou)) +- added UTC to server timezones list; [\#4207](https://github.com/netdata/netdata/pull/4207) ([ktsaou](https://github.com/ktsaou)) +- redis plugin bugfix [\#4205](https://github.com/netdata/netdata/pull/4205) ([ilyam8](https://github.com/ilyam8)) +- send host variables to prometheus [\#4200](https://github.com/netdata/netdata/pull/4200) ([ktsaou](https://github.com/ktsaou)) +- Update CONTRIBUTORS.md [\#4197](https://github.com/netdata/netdata/pull/4197) ([paulfantom](https://github.com/paulfantom)) +- nginx\_plus: use upstream server IP:port in dimension IDs, not the transient ID [\#4194](https://github.com/netdata/netdata/pull/4194) ([illes](https://github.com/illes)) +- time-duration badges should show "undefined" instead of "never" [\#4193](https://github.com/netdata/netdata/pull/4193) ([ktsaou](https://github.com/ktsaou)) +- Add docker plugin [\#4191](https://github.com/netdata/netdata/pull/4191) ([tuxity](https://github.com/tuxity)) +- Improve packaging checks [\#4188](https://github.com/netdata/netdata/pull/4188) ([philwhineray](https://github.com/philwhineray)) +- elasticsearch: handle json parse error in threads [\#4186](https://github.com/netdata/netdata/pull/4186) ([ilyam8](https://github.com/ilyam8)) +- pythond\_small\_fixes [\#4185](https://github.com/netdata/netdata/pull/4185) ([ilyam8](https://github.com/ilyam8)) +- \[cleanup crusade\] more linting of bash modules [\#4183](https://github.com/netdata/netdata/pull/4183) ([paulfantom](https://github.com/paulfantom)) +- create pid directory, if not present [\#4181](https://github.com/netdata/netdata/pull/4181) ([ktsaou](https://github.com/ktsaou)) +- fix for load alarms [\#4180](https://github.com/netdata/netdata/pull/4180) ([ktsaou](https://github.com/ktsaou)) +- updated configs.signatures [\#4179](https://github.com/netdata/netdata/pull/4179) ([ktsaou](https://github.com/ktsaou)) +- Add permission file check in ceph module [\#4177](https://github.com/netdata/netdata/pull/4177) ([lets00](https://github.com/lets00)) +- \[cleanup crusade\] disable linters on installer scripts [\#4176](https://github.com/netdata/netdata/pull/4176) ([paulfantom](https://github.com/paulfantom)) +- Add alarms for abnormally high load averages. [\#4175](https://github.com/netdata/netdata/pull/4175) ([Ferroin](https://github.com/Ferroin)) +- CI builds in containers [\#4174](https://github.com/netdata/netdata/pull/4174) ([paulfantom](https://github.com/paulfantom)) +- Fix lack of dot [\#4172](https://github.com/netdata/netdata/pull/4172) ([paulfantom](https://github.com/paulfantom)) +- remove condition from netdata.service [\#4170](https://github.com/netdata/netdata/pull/4170) ([ktsaou](https://github.com/ktsaou)) +- \[WIP\] fail2ban: ipv6 support added + module simplification [\#4168](https://github.com/netdata/netdata/pull/4168) ([ilyam8](https://github.com/ilyam8)) +- \[cleanup crusade\] linting shell scripts for docker, tests and python [\#4162](https://github.com/netdata/netdata/pull/4162) ([paulfantom](https://github.com/paulfantom)) +- \[cleanup crusade\] shellcheck in contrib [\#4160](https://github.com/netdata/netdata/pull/4160) ([paulfantom](https://github.com/paulfantom)) +- \[cleanup crusade\] Lint bash scripts on letter A [\#4159](https://github.com/netdata/netdata/pull/4159) ([paulfantom](https://github.com/paulfantom)) +- use pidfile to send HUP to netdata via logrotate; [\#4157](https://github.com/netdata/netdata/pull/4157) ([ktsaou](https://github.com/ktsaou)) +- python plugin monotonic fix [\#4156](https://github.com/netdata/netdata/pull/4156) ([ilyam8](https://github.com/ilyam8)) +- add variable system.cpu.processors for alarms; [\#4155](https://github.com/netdata/netdata/pull/4155) ([ktsaou](https://github.com/ktsaou)) +- netdata.service is now installed in /lib/systemd/system; [\#4151](https://github.com/netdata/netdata/pull/4151) ([ktsaou](https://github.com/ktsaou)) +- name veritas volume disk groups [\#4150](https://github.com/netdata/netdata/pull/4150) ([ktsaou](https://github.com/ktsaou)) +- do not get the address of FILE pointer; [\#4149](https://github.com/netdata/netdata/pull/4149) ([ktsaou](https://github.com/ktsaou)) +- Add some extra error logging to the spigotmc module. [\#4148](https://github.com/netdata/netdata/pull/4148) ([Ferroin](https://github.com/Ferroin)) +- /proc/net/snmp minimum line length for IcmpMsg is 2 words, not 3 [\#4147](https://github.com/netdata/netdata/pull/4147) ([ktsaou](https://github.com/ktsaou)) +- when running under systemd, keep the process scheduling parameters set [\#4143](https://github.com/netdata/netdata/pull/4143) ([ktsaou](https://github.com/ktsaou)) +- \[cleanup crusade\] travis build stages [\#4142](https://github.com/netdata/netdata/pull/4142) ([paulfantom](https://github.com/paulfantom)) +- Add ignore-status option to freeipmi\_plugin [\#4141](https://github.com/netdata/netdata/pull/4141) ([plasticrake](https://github.com/plasticrake)) +- \[cleanup crusade\] move profiling to tests directory [\#4140](https://github.com/netdata/netdata/pull/4140) ([paulfantom](https://github.com/paulfantom)) +- Less verbose bash and curl unpacking [\#4139](https://github.com/netdata/netdata/pull/4139) ([paulfantom](https://github.com/paulfantom)) +- \[project management\] add github CODEOWNERS [\#4137](https://github.com/netdata/netdata/pull/4137) ([paulfantom](https://github.com/paulfantom)) +- \[cleanup crusade\] cleanup licenses [\#4136](https://github.com/netdata/netdata/pull/4136) ([paulfantom](https://github.com/paulfantom)) +- Add ProxySQL python plugin [\#4112](https://github.com/netdata/netdata/pull/4112) ([alibo](https://github.com/alibo)) +- Optimize counting of recusive pins [\#4095](https://github.com/netdata/netdata/pull/4095) ([pjz](https://github.com/pjz)) +- \[nginx\_plus\] fix handling of non-contiguous peer IDs [\#4093](https://github.com/netdata/netdata/pull/4093) ([illes](https://github.com/illes)) +- web\_log Virtual host enhancement and http/https [\#4076](https://github.com/netdata/netdata/pull/4076) ([jgrossiord](https://github.com/jgrossiord)) +- push host tags for graphite; [\#3992](https://github.com/netdata/netdata/pull/3992) ([ktsaou](https://github.com/ktsaou)) +- rethinkdb python plugin [\#3955](https://github.com/netdata/netdata/pull/3955) ([ilyam8](https://github.com/ilyam8)) +- Add a python plugin for monitoring power supplies on Linux. [\#3799](https://github.com/netdata/netdata/pull/3799) ([Ferroin](https://github.com/Ferroin)) + + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 000000000..cb1e1ef48 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,575 @@ + +# SPDX-License-Identifier: GPL-3.0-or-later +# This file is only used for development (netdata in Clion) +# It can build netdata, but you are on your own... + +cmake_minimum_required(VERSION 3.0.2) +project(netdata C) + +find_package(Threads REQUIRED) +find_package(PkgConfig REQUIRED) + +# default is "Debug" +#set(CMAKE_BUILD_TYPE "Release") + +# set this to see the compilation commands +# set(CMAKE_VERBOSE_MAKEFILE 1) + + +# ----------------------------------------------------------------------------- +# Set compilation options according to build type + +IF("${CMAKE_BUILD_TYPE}" MATCHES "Debug") + message(STATUS "building for: debugging") + + ## unfortunately these produce errors + #include(CheckCXXCompilerFlag) + #CHECK_CXX_COMPILER_FLAG("-Wformat-signedness" CXX_FORMAT_SIGNEDNESS) + #CHECK_CXX_COMPILER_FLAG("-Werror=format-security" CXX_FORMAT_SECURITY) + #CHECK_CXX_COMPILER_FLAG("-fstack-protector-all" CXX_STACK_PROTECTOR) + set(CXX_FORMAT_SIGNEDNESS "-Wformat-signedness") + set(CXX_FORMAT_SECURITY "-Werror=format-security") + set(CXX_STACK_PROTECTOR "-fstack-protector-all") + set(CXX_FLAGS_DEBUG "-O0") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O1 -ggdb -Wall -Wextra -DNETDATA_INTERNAL_CHECKS=1 -DNETDATA_VERIFY_LOCKS=1 ${CXX_FORMAT_SIGNEDNESS} ${CXX_FORMAT_SECURITY} ${CXX_STACK_PROTECTOR} ${CXX_FLAGS_DEBUG}") +ELSE() + message(STATUS "building for: release") + cmake_policy(SET CMP0069 "NEW") + include(CheckIPOSupported) + check_ipo_supported(RESULT ipo_supported OUTPUT error) + IF(${ipo_supported}) + message(STATUS "link time optimization: supported") + set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) + ELSE() + message(STATUS "link time optimization: not supported") + ENDIF() +ENDIF() + + +# ----------------------------------------------------------------------------- +# O/S Detection + +# these are defined in common.h too +SET(LINUX False) +SET(FREEBSD False) +SET(MACOS False) + +# Detect the operating system +IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + SET(TARGET_OS_NAME "macos") + SET(TARGET_OS 3) + SET(MACOS True) +ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD") + SET(TARGET_OS_NAME "freebsd") + SET(TARGET_OS 2) + SET(FREEBSD True) +ELSE() + SET(TARGET_OS_NAME "linux") + SET(TARGET_OS 1) + SET(LINUX True) +ENDIF() + +# show the operating system on the console +message(STATUS "operating system: ${TARGET_OS_NAME} (TARGET_OS=${TARGET_OS})") + + +# ----------------------------------------------------------------------------- +# Detect libuuid + +pkg_check_modules(UUID REQUIRED uuid) +set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${UUID_CFLAGS_OTHER}) +set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${UUID_LIBRARIES}) +set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${UUID_INCLUDE_DIRS}) + +# ----------------------------------------------------------------------------- +# Detect libz + +pkg_check_modules(ZLIB REQUIRED zlib) +set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${ZLIB_CFLAGS_OTHER}) +set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${ZLIB_LIBRARIES}) +set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIRS}) + + +# ----------------------------------------------------------------------------- +# Detect libcap + +IF(LINUX) + pkg_check_modules(CAP QUIET libcap) + # later we use: + # ${CAP_LIBRARIES} + # ${CAP_CFLAGS_OTHER} + # ${CAP_INCLUDE_DIRS} +ENDIF(LINUX) + + +# ----------------------------------------------------------------------------- +# Detect libipmimonitoring + +IF(LINUX) + pkg_check_modules(IPMI libipmimonitoring) + # later we use: + # ${IPMI_LIBRARIES} + # ${IPMI_CFLAGS_OTHER} + # ${IPMI_INCLUDE_DIRS} +ENDIF(LINUX) + + +# ----------------------------------------------------------------------------- +# Detect libmnl +IF(LINUX) + pkg_check_modules(MNL libmnl) + # later we use: + # ${MNL_LIBRARIES} + # ${MNL_CFLAGS_OTHER} + # ${MNL_INCLUDE_DIRS} +ENDIF(LINUX) + + +# ----------------------------------------------------------------------------- +# Detect libmnl + +pkg_check_modules(NFACCT libnetfilter_acct) +# later we use: +# ${NFACCT_LIBRARIES} +# ${NFACCT_CFLAGS_OTHER} +# ${NFACCT_INCLUDE_DIRS} + + +# ----------------------------------------------------------------------------- +# Detect MacOS IOKit/Foundation framework + +IF(MACOS) + find_library(IOKIT IOKit) + find_library(FOUNDATION Foundation) + # later we use: + # ${FOUNDATION} + # ${IOKIT} +ENDIF(MACOS) + + +# ----------------------------------------------------------------------------- +# netdata files + +set(LIBNETDATA_FILES + libnetdata/adaptive_resortable_list/adaptive_resortable_list.c + libnetdata/adaptive_resortable_list/adaptive_resortable_list.h + libnetdata/config/appconfig.c + libnetdata/config/appconfig.h + libnetdata/avl/avl.c + libnetdata/avl/avl.h + libnetdata/buffer/buffer.c + libnetdata/buffer/buffer.h + libnetdata/clocks/clocks.c + libnetdata/clocks/clocks.h + libnetdata/dictionary/dictionary.c + libnetdata/dictionary/dictionary.h + libnetdata/eval/eval.c + libnetdata/eval/eval.h + libnetdata/inlined.h + libnetdata/libnetdata.c + libnetdata/libnetdata.h + libnetdata/locks/locks.c + libnetdata/locks/locks.h + libnetdata/log/log.c + libnetdata/log/log.h + libnetdata/os.c + libnetdata/os.h + libnetdata/popen/popen.c + libnetdata/popen/popen.h + libnetdata/procfile/procfile.c + libnetdata/procfile/procfile.h + libnetdata/simple_pattern/simple_pattern.c + libnetdata/simple_pattern/simple_pattern.h + libnetdata/socket/socket.c + libnetdata/socket/socket.h + libnetdata/statistical/statistical.c + libnetdata/statistical/statistical.h + libnetdata/storage_number/storage_number.c + libnetdata/storage_number/storage_number.h + libnetdata/threads/threads.c + libnetdata/threads/threads.h + libnetdata/url/url.c + libnetdata/url/url.h + ) + +add_library(libnetdata OBJECT ${LIBNETDATA_FILES}) + +set(APPS_PLUGIN_FILES + collectors/apps.plugin/apps_plugin.c + ) + +set(CHECKS_PLUGIN_FILES + collectors/checks.plugin/plugin_checks.c + collectors/checks.plugin/plugin_checks.h + ) + +set(FREEBSD_PLUGIN_FILES + collectors/freebsd.plugin/plugin_freebsd.c + collectors/freebsd.plugin/plugin_freebsd.h + collectors/freebsd.plugin/freebsd_sysctl.c + collectors/freebsd.plugin/freebsd_getmntinfo.c + collectors/freebsd.plugin/freebsd_getifaddrs.c + collectors/freebsd.plugin/freebsd_devstat.c + collectors/freebsd.plugin/freebsd_kstat_zfs.c + collectors/freebsd.plugin/freebsd_ipfw.c + collectors/proc.plugin/zfs_common.c + collectors/proc.plugin/zfs_common.h + ) + +set(HEALTH_PLUGIN_FILES + health/health.c + health/health.h + health/health_config.c + health/health_json.c + health/health_log.c) + +set(IDLEJITTER_PLUGIN_FILES + collectors/idlejitter.plugin/plugin_idlejitter.c + collectors/idlejitter.plugin/plugin_idlejitter.h + ) + +set(CGROUPS_PLUGIN_FILES + collectors/cgroups.plugin/sys_fs_cgroup.c + collectors/cgroups.plugin/sys_fs_cgroup.h + ) + +set(CGROUP_NETWORK_FILES + collectors/cgroups.plugin/cgroup-network.c + ) + +set(DISKSPACE_PLUGIN_FILES + collectors/diskspace.plugin/plugin_diskspace.h + collectors/diskspace.plugin/plugin_diskspace.c + ) + +set(FREEIPMI_PLUGIN_FILES + collectors/freeipmi.plugin/freeipmi_plugin.c + ) + +set(NFACCT_PLUGIN_FILES + collectors/nfacct.plugin/plugin_nfacct.c + collectors/nfacct.plugin/plugin_nfacct.h + ) + +set(PROC_PLUGIN_FILES + collectors/proc.plugin/ipc.c + collectors/proc.plugin/plugin_proc.c + collectors/proc.plugin/plugin_proc.h + collectors/proc.plugin/proc_diskstats.c + collectors/proc.plugin/proc_mdstat.c + collectors/proc.plugin/proc_interrupts.c + collectors/proc.plugin/proc_softirqs.c + collectors/proc.plugin/proc_loadavg.c + collectors/proc.plugin/proc_meminfo.c + collectors/proc.plugin/proc_net_dev.c + collectors/proc.plugin/proc_net_ip_vs_stats.c + collectors/proc.plugin/proc_net_netstat.c + collectors/proc.plugin/proc_net_rpc_nfs.c + collectors/proc.plugin/proc_net_rpc_nfsd.c + collectors/proc.plugin/proc_net_snmp.c + collectors/proc.plugin/proc_net_snmp6.c + collectors/proc.plugin/proc_net_sctp_snmp.c + collectors/proc.plugin/proc_net_sockstat.c + collectors/proc.plugin/proc_net_sockstat6.c + collectors/proc.plugin/proc_net_softnet_stat.c + collectors/proc.plugin/proc_net_stat_conntrack.c + collectors/proc.plugin/proc_net_stat_synproxy.c + collectors/proc.plugin/proc_self_mountinfo.c + collectors/proc.plugin/proc_self_mountinfo.h + collectors/proc.plugin/zfs_common.c + collectors/proc.plugin/zfs_common.h + collectors/proc.plugin/proc_spl_kstat_zfs.c + collectors/proc.plugin/proc_stat.c + collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c + collectors/proc.plugin/proc_vmstat.c + collectors/proc.plugin/proc_uptime.c + collectors/proc.plugin/sys_kernel_mm_ksm.c + collectors/proc.plugin/sys_devices_system_edac_mc.c + collectors/proc.plugin/sys_devices_system_node.c + collectors/proc.plugin/sys_fs_btrfs.c + collectors/proc.plugin/sys_class_power_supply.c + ) + +set(TC_PLUGIN_FILES + collectors/tc.plugin/plugin_tc.c + collectors/tc.plugin/plugin_tc.h + ) + +set(MACOS_PLUGIN_FILES + collectors/macos.plugin/plugin_macos.c + collectors/macos.plugin/plugin_macos.h + collectors/macos.plugin/macos_sysctl.c + collectors/macos.plugin/macos_mach_smi.c + collectors/macos.plugin/macos_fw.c + ) + +set(PLUGINSD_PLUGIN_FILES + collectors/plugins.d/plugins_d.c + collectors/plugins.d/plugins_d.h + ) + +set(REGISTRY_PLUGIN_FILES + registry/registry.c + registry/registry.h + registry/registry_db.c + registry/registry_init.c + registry/registry_internals.c + registry/registry_internals.h + registry/registry_log.c + registry/registry_machine.c + registry/registry_machine.h + registry/registry_person.c + registry/registry_person.h + registry/registry_url.c + registry/registry_url.h + ) + +set(STATSD_PLUGIN_FILES + collectors/statsd.plugin/statsd.c + collectors/statsd.plugin/statsd.h + ) + +set(RRD_PLUGIN_FILES + database/rrdcalc.c + database/rrdcalc.h + database/rrdcalctemplate.c + database/rrdcalctemplate.h + database/rrddim.c + database/rrddimvar.c + database/rrddimvar.h + database/rrdfamily.c + database/rrdhost.c + database/rrd.c + database/rrd.h + database/rrdset.c + database/rrdsetvar.c + database/rrdsetvar.h + database/rrdvar.c + database/rrdvar.h + ) + +set(WEB_PLUGIN_FILES + web/server/web_client.c + web/server/web_client.h + web/server/web_server.c + web/server/web_server.h + web/server/static/static-threaded.c + web/server/static/static-threaded.h + web/server/web_client_cache.c + web/server/web_client_cache.h + ) + +set(API_PLUGIN_FILES + web/api/web_api_v1.c + web/api/web_api_v1.h + web/api/badges/web_buffer_svg.c + web/api/badges/web_buffer_svg.h + web/api/exporters/allmetrics.c + web/api/exporters/allmetrics.h + web/api/exporters/shell/allmetrics_shell.c + web/api/exporters/shell/allmetrics_shell.h + web/api/queries/rrdr.c + web/api/queries/rrdr.h + web/api/queries/query.c + web/api/queries/query.h + web/api/queries/average/average.c + web/api/queries/average/average.h + web/api/queries/incremental_sum/incremental_sum.c + web/api/queries/incremental_sum/incremental_sum.h + web/api/queries/max/max.c + web/api/queries/max/max.h + web/api/queries/min/min.c + web/api/queries/min/min.h + web/api/queries/sum/sum.c + web/api/queries/sum/sum.h + web/api/queries/median/median.c + web/api/queries/median/median.h + web/api/queries/stddev/stddev.c + web/api/queries/stddev/stddev.h + web/api/queries/ses/ses.c + web/api/queries/ses/ses.h + web/api/queries/des/des.c + web/api/queries/des/des.h + web/api/formatters/rrd2json.c + web/api/formatters/rrd2json.h + web/api/formatters/csv/csv.c + web/api/formatters/csv/csv.h + web/api/formatters/json/json.c + web/api/formatters/json/json.h + web/api/formatters/ssv/ssv.c + web/api/formatters/ssv/ssv.h + web/api/formatters/value/value.c + web/api/formatters/value/value.h + web/api/formatters/json_wrapper.c + web/api/formatters/json_wrapper.h + web/api/formatters/charts2json.c + web/api/formatters/charts2json.h + web/api/formatters/rrdset2json.c + web/api/formatters/rrdset2json.h + web/api/health/health_cmdapi.c + ) + +set(STREAMING_PLUGIN_FILES + streaming/rrdpush.c + streaming/rrdpush.h + ) + +set(BACKENDS_PLUGIN_FILES + backends/backends.c + backends/backends.h + backends/graphite/graphite.c + backends/graphite/graphite.h + backends/json/json.c + backends/json/json.h + backends/opentsdb/opentsdb.c + backends/opentsdb/opentsdb.h + backends/prometheus/backend_prometheus.c + backends/prometheus/backend_prometheus.h + ) + +set(DAEMON_FILES + daemon/common.c + daemon/common.h + daemon/daemon.c + daemon/daemon.h + daemon/global_statistics.c + daemon/global_statistics.h + daemon/main.c + daemon/main.h + daemon/signals.c + daemon/signals.h + daemon/unit_test.c + daemon/unit_test.h + ) + +set(NETDATA_FILES + collectors/all.h + ${DAEMON_FILES} + ${API_PLUGIN_FILES} + ${BACKENDS_PLUGIN_FILES} + ${CHECKS_PLUGIN_FILES} + ${HEALTH_PLUGIN_FILES} + ${IDLEJITTER_PLUGIN_FILES} + ${PLUGINSD_PLUGIN_FILES} + ${RRD_PLUGIN_FILES} + ${REGISTRY_PLUGIN_FILES} + ${STATSD_PLUGIN_FILES} + ${STREAMING_PLUGIN_FILES} + ${WEB_PLUGIN_FILES} + ) + +IF(LINUX AND MNL_LIBRARIES AND NFACCT_LIBRARIES) + message(STATUS "nfacct.plugin: enabled (will work only if netdata runs as root)") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DINTERNAL_PLUGIN_NFACCT=1") +ELSE() + message(STATUS "nfacct.plugin: disabled (requires libmnl and libnetfilter_acct)") +ENDIF() + +include_directories(AFTER .) + +add_definitions( + -DHAVE_CONFIG_H + -DTARGET_OS=${TARGET_OS} + -DCACHE_DIR="/var/cache/netdata" + -DCONFIG_DIR="/etc/netdata" + -DLIBCONFIG_DIR="/usr/lib/netdata/conf.d" + -DLOG_DIR="/var/log/netdata" + -DPLUGINS_DIR="/usr/libexec/netdata" + -DWEB_DIR="/usr/share/netdata/web" + -DVARLIB_DIR="/var/lib/netdata" +) + +# ----------------------------------------------------------------------------- +# netdata + +set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} m ${CMAKE_THREAD_LIBS_INIT}) + +IF(LINUX) + add_executable(netdata config.h ${NETDATA_FILES} + ${CGROUPS_PLUGIN_FILES} + ${DISKSPACE_PLUGIN_FILES} + ${NFACCT_PLUGIN_FILES} + ${PROC_PLUGIN_FILES} + ${TC_PLUGIN_FILES} + ) + target_link_libraries (netdata libnetdata ${NETDATA_COMMON_LIBRARIES} + ${MNL_LIBRARIES} + ${NFACCT_LIBRARIES} + ) + target_include_directories(netdata PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} + ${MNL_INCLUDE_DIRS} + ${NFACCT_INCLUDE_DIRS} + ) + target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS} + ${MNL_CFLAGS_OTHER} + ${NFACCT_CFLAGS_OTHER} + ) + + SET(ENABLE_PLUGIN_CGROUP_NETWORK True) + SET(ENABLE_PLUGIN_APPS True) + +ELSEIF(FREEBSD) + add_executable(netdata config.h ${NETDATA_FILES} ${FREEBSD_PLUGIN_FILES}) + target_link_libraries (netdata libnetdata ${NETDATA_COMMON_LIBRARIES}) + target_include_directories(netdata PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS}) + target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS}) + SET(ENABLE_PLUGIN_CGROUP_NETWORK False) + SET(ENABLE_PLUGIN_APPS True) + +ELSEIF(MACOS) + add_executable(netdata config.h ${NETDATA_FILES} ${MACOS_PLUGIN_FILES}) + target_link_libraries (netdata libnetdata ${NETDATA_COMMON_LIBRARIES} ${IOKIT} ${FOUNDATION}) + target_include_directories(netdata PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS}) + target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS}) + SET(ENABLE_PLUGIN_CGROUP_NETWORK False) + SET(ENABLE_PLUGIN_APPS False) + +ENDIF() + +IF(IPMI_LIBRARIES) + SET(ENABLE_PLUGIN_FREEIPMI True) +ELSE() + SET(ENABLE_PLUGIN_FREEIPMI False) +ENDIF() + + +# ----------------------------------------------------------------------------- +# apps.plugin + +IF(ENABLE_PLUGIN_APPS) + message(STATUS "apps.plugin: enabled") + add_executable(apps.plugin config.h ${APPS_PLUGIN_FILES}) + target_link_libraries (apps.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${CAP_LIBRARIES}) + target_include_directories(apps.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${CAP_INCLUDE_DIRS}) + target_compile_options(apps.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${CAP_CFLAGS_OTHER}) +ELSE() + message(STATUS "apps.plugin: disabled") +ENDIF() + + +# ----------------------------------------------------------------------------- +# freeipmi.plugin + +IF(ENABLE_PLUGIN_FREEIPMI) + message(STATUS "freeipmi.plugin: enabled") + add_executable(freeipmi.plugin config.h ${FREEIPMI_PLUGIN_FILES}) + target_link_libraries (freeipmi.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${IPMI_LIBRARIES}) + target_include_directories(apps.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${IPMI_INCLUDE_DIRS}) + target_compile_options(apps.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${IPMI_CFLAGS_OTHER}) +ELSE() + message(STATUS "freeipmi.plugin: disabled (depends on libipmimonitoring)") +ENDIF() + + +# ----------------------------------------------------------------------------- +# cgroup-network + +IF(ENABLE_PLUGIN_CGROUP_NETWORK) + message(STATUS "cgroup-network: enabled") + add_executable(cgroup-network config.h ${CGROUP_NETWORK_FILES}) + target_link_libraries (cgroup-network libnetdata ${NETDATA_COMMON_LIBRARIES}) + target_include_directories(apps.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS}) + target_compile_options(apps.plugin PUBLIC ${NETDATA_COMMON_CFLAGS}) +ELSE() + message(STATUS "cgroup-network: disabled (requires Linux)") +ENDIF() diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 646a7d481..13424ce25 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -72,3 +72,5 @@ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.ht [homepage]: https://www.contributor-covenant.org + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCODE_OF_CONDUCT&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fa0656526..ec2ecf1fb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,8 @@ Thank you for considering contributing to Netdata. We love to receive contributions. Maintaining a platform for monitoring everything imaginable requires a broad understanding of a plethora of technologies, systems and applications. We rely on community contributions and user feedback to continue providing the best monitoring solution out there. -There are many ways to contribute, with varying requirements of skills: +There are many ways to contribute, with varying requirements of skills, explained in detail in the following sections. +Specific GitHub issues we need help with can be seen [here](https://github.com/netdata/netdata/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22help+wanted%22). Some of them are also labeled as "good first issue". ## All NetData Users @@ -20,9 +21,33 @@ Community growth allows the project to attract new talent willing to contribute. Is there anything that bothers you about netdata? Did you experience an issue while installing it or using it? Would you like to see it evolve to you need? Let us know. [Open a github issue](https://github.com/netdata/netdata/issues) to discuss it. Feedback is very important for open-source projects. We can't commit we will do everything, but your feedback influences our road-map significantly. **We rely on your feedback to make Netdata better**. -#### Help the developers understand what they have to do +### Sponsor a part of Netdata -NetData is all about simplicity and meaningful presentation. It's impossible for a handful of people to know which metrics really matter when monitoring a particular software or hardware component you are interested in. Be specific about what should be collected, how the information should be presented in the dashboard and which alarms make sense in most situations. +Netdata is a complex system, with many integrations for the various collectors, backends and notification endpoints. As a result, we rely on help from "sponsors", a concept similar to "power users" or "product owners". To become a sponsor, just let us know in any Github issue and we will record your GitHub username in a "CONTRIBUTORS.md" in the appropriate directory. + +#### Sponsor a collector + +Netdata is all about simplicity and meaningful presentation. A "sponsor" for a collector does the following: + - Assists the devs with feedback on the charts. + - Specifies the alarms that would make sense for each metric. + - When the implementation passes QA, tests the implementation in production. + - Uses the charts and alarms in his/her day to day work and provides additional feedback. + - Requests additional improvements as things change (e.g. new versions of an API are available). + +#### Sponsor a backend + +We already support various [backends](backends) and we intend to support more. A "sponsor" for a backend: +- Suggests ways in which the information in Netdata could best be exposed to the particular backend, to facilitate meaningful presentation. + - When the implementation passes QA, tests the implementation in production. +- Uses the backend in his/her day to day work and provides additional feedback, after the backend is delivered. + - Requests additional improvements as things change (e.g. new versions of the backend API are available). + +#### Sponsor a notification method + +Netdata delivers alarms via various [notification methods](health/notifications). A "sponsor" for a notification method: +- Points the devs to the documentation for the API and identifies any unusual features of interest (e.g. the ability in Slack to send a notification either to a channel or to a user). +- Uses the notification method in production and provides feedback. +- Requests additional improvements as things change (e.g. new versions of the API are available). ## Experienced Users @@ -32,12 +57,13 @@ As the project grows, an increasing share of our time is spent on supporting thi ### Improve documentation -Most of our documentation is in markdown (.md) files inside the netdata GitHub project. What remains in our Wiki will soon be moved in there as well. Don't be afraid to edit any of these documents and submit a GitHub Pull Request with your corrections/additions. +All of our documentation is in markdown (.md) files inside the netdata GitHub project. All of our [HTML documentation](https://docs.netdata.cloud) is generated from these files. At the top right of each documentation page you will see a pencil, that leads you directly to the markdown file that was used to generated it. Don't be afraid to click it and edit any of these documents and submit a GitHub Pull Request with your corrections/additions. +We also need help to [document each chart in the default dashboard](https://github.com/netdata/netdata/issues/279). ## Developers -We expect most contributions to be for new data collection plugins. You can read about how external plugins work [here](collectors/plugins.d/). Additional instructions are available for [Node.js plugins](collectors/node.d.plugin) and [Python plugis](collectors/python.d.plugin). +We expect most contributions to be for new data collection plugins. You can read about how external plugins work [here](collectors/plugins.d/). Additional instructions are available for [Node.js plugins](collectors/node.d.plugin) and [Python plugins](collectors/python.d.plugin). Of course we appreciate contributions for any other part of the NetData agent, including the [daemon](daemon), [backends for long term archiving](backends/), innovative ways of using the [REST API](web/api) to create cool [Custom Dashboards](web/gui/custom/) or to include NetData charts in other applications, similarly to what can be done with [Confluence](web/gui/confluence/). @@ -90,3 +116,23 @@ Your contributions should be bundled with related documentation to help users un When you contribute code to Netdata, you are automatically accepting that you will be responsible for maintaining that code in the future. So, if users need help, or report bugs, we will invite you to the related github issues to help them or fix the issues or bugs of your contributions. +### Your first pull request + +There are several guides for pull requests, such as the following: +- https://thenewstack.io/getting-legit-with-git-and-github-your-first-pull-request/ +- https://github.com/firstcontributions/first-contributions#first-contributions + +However, it's not always that simple. Our [PR approval process](#pr-approval-process) and the several merges we do every day may cause your fork to get behind the Netdata master. If you worked on something that has changed in the meantime, you will be required to do a git rebase, to bring your fork to the correct state. A very easy to follow guide on how to do it without learning all the intricacies of GitHub can be found [here](https://medium.com/@ruthmpardee/git-fork-workflow-using-rebase-587a144be470) + +One thing you will need to do only for your first pull request in Netdata is to accept the CLA. Until you do, the automated check for the CLA acceptance will be showing as failed. + +### PR approval process + +Each PR automatically [requires a review](https://help.github.com/articles/about-required-reviews-for-pull-requests/) from the code owners specified in `.github/CODEOWNERS`. Depending on the files contained in your PR, several people may be need to approve it. + +We also have a series of automated checks running, such as linters to check code quality and QA tests. If you get an error or warning in any of those checks, you will need to click on the link included in the check to identify the root cause, so you can fix it. + +One special type of automated check is the "WIP" check. You may add "[WIP]" to the title of the PR, to tell us that the particular request is "Work In Progress" and should not be merged. You're still not done with it, you created it to get some feedback. When you're ready to get the final approvals and get it merged, just remove the "[WIP]" string from the title of your PR and the "WIP" check will pass. + + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTING&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 46ae396f3..ada8b0bbe 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -121,3 +121,6 @@ username|name|email (optional) @pohzipohzi|Poh Zi How @vladmovchan|Vladyslav Movchan|vladislav.movchan@gmail.com @gmosx|George Moschovitis +@adherzog|Adam Herzog|adam@adamherzog.com + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FCONTRIBUTORS&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/HISTORICAL_CHANGELOG.md b/HISTORICAL_CHANGELOG.md new file mode 100644 index 000000000..3e7688f33 --- /dev/null +++ b/HISTORICAL_CHANGELOG.md @@ -0,0 +1,655 @@ +netdata (1.10.0) - 2018-03-27 + + Please check full changelog at github. + https://github.com/netdata/netdata/releases + + +netdata (1.9.0) - 2017-12-17 + + Please check full changelog at github. + https://github.com/netdata/netdata/releases + + +netdata (1.8.0) - 2017-09-17 + + This is mainly a bugfix release. + Please check full changelog at github. + + +netdata (1.7.0) - 2017-07-16 + + * netdata is still spreading fast + + we are at 320.000 users and 132.000 servers + + Almost 100k new users, 52k new installations and 800k docker pulls + since the previous release, 4 and a half months ago. + + netdata user base grows at about 1000 new users and 600 new servers + per day. Thank you. You are awesome. + + * The next release (v1.8) will be focused on providing a global health + monitoring service, for all netdata users, for free. + + * netdata is now a (very fast) fully featured statsd server and the + only one with automatic visualization: push a statsd metric and hit + F5 on the netdata dashboard: your metric visualized. It also supports + synthetic charts, defined by you, so that you can correlate and + visualize your application the way you like it. + + * netdata got new installation options + It is now easier than ever to install netdata - we also distribute a + statically linked netdata x86_64 binary, including key dependencies + (like bash, curl, etc) that can run everywhere a Linux kernel runs + (CoreOS, CirrOS, etc). + + * metrics streaming and replication has been improved significantly. + All known issues have been solved and key enhancements have been added. + Headless collectors and proxies can now send metrics to backends when + data source = as collected. + + * backends have got quite a few enhancements, including host tags and + metrics filtering at the netdata side; + prometheus support has been re-written to utilize more prometheus + features and provide more flexibility and integration options. + + * netdata now monitors ZFS (on Linux and FreeBSD), ElasticSearch, + RabbitMQ, Go applications (via expvar), ipfw (on FreeBSD 11), samba, + squid logs (with web_log plugin). + + * netdata dashboard loading times have been improved significantly + (hit F5 a few times on a netdata dashboard - it is now amazingly fast), + to support dashboards with thousands of charts. + + * netdata alarms now support custom hooks, so you can run whatever you + like in parallel with netdata alarms. + + * As usual, this release brings dozens of more improvements, enhancements + and compatibility fixes. + + +netdata (1.6.0) - 2017-03-20 + + * birthday release: 1 year netdata + + netdata was first published on March 30th, 2016. + It has been a crazy year since then: + + 225.000 unique netdata users + currently, at 1.000 new unique users per day + + 80.000 unique netdata installations + currently, at 500 new installation per day + + 610.000 docker pulls on docker hub + + 4.000.000 netdata sessions served + currently, at 15.000 sessions served per day + + 20.000 github stars + + Thank you! + You are awesome! + + * central netdata is here + + This is the first release that supports real-time streaming of + metrics between netdata servers. + + netdata can now be: + + - autonomous host monitoring + (like it always has been) + + - headless data collector + (collect and stream metrics in real-time to another netdata) + + - headless proxy + (collect metrics from multiple netdata and stream them to another netdata) + + - store and forward proxy + (like headless proxy, but with a local database) + + - central database + (metrics from multiple hosts are aggregated) + + metrics databases can be configured on all nodes and each node maintaining + a database may have a different retention policy and possibly run + (even different) alarms on them. + + * monitoring ephemeral nodes + + netdata now supports monitoring autoscaled ephemeral nodes, + that are started and stopped on demand (their IP is not known). + + When the ephemeral nodes start streaming metrics to the central + netdata, the central netdata will show register them at "my-netdata" + menu on the dashboard. + + For more information check: + https://github.com/netdata/netdata/tree/master/streaming#monitoring-ephemeral-nodes + + * monitoring ephemeral containers and VM guests + + netdata now cleans up container, guest VM, network interfaces and mounted + disk metrics, disabling automatically their alarms too. + + For more information check: + https://github.com/netdata/netdata/tree/master/collectors/cgroups.plugin#monitoring-ephemeral-containers + + * apps.plugin ported for FreeBSD + + @vlvkobal has ported "apps.plugin" to FreeBSD. netdata can now provide + "Applications", "Users" and "User Groups" on FreeBSD. + + * web_log plugin + + @l2isbad has done a wonderful job creating a unified web log parsing plugin + for all kinds of web server logs. With it, netdata provides real-time + performance information and health monitoring alarms for web applications + and web sites! + + For more information check: + https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/web_log#web_log + + * backends + + netdata can now archive metrics to `JSON` backends + (both push, by @lfdominguez, and pull modes). + + * IPMI monitoring + + netdata now has an IPMI plugin (based on freeipmi) + for monitoring server hardware. + + The plugin creates (up to) 8 charts: + + 1. number of sensors by state + 2. number of events in SEL + 3. Temperatures CELCIUS + 4. Temperatures FAHRENHEIT + 5. Voltages + 6. Currents + 7. Power + 8. Fans + + It also supports alarms (including the number of sensors in critical state). + + For more information, check: + https://github.com/netdata/netdata/tree/master/collectors/freeipmi.plugin + + * new plugins + + @l2isbad builds python data collection plugins for netdata at an wonderfull + rate! He rocks! + + - **web_log** for monitoring in real-time all kinds of web server log files @l2isbad + - **freeipmi** for monitoring IPMI (server hardware) + - **nsd** (the [name server daemon](https://www.nlnetlabs.nl/projects/nsd/)) @383c57 + - **mongodb** @l2isbad + - **smartd_log** (monitoring disk S.M.A.R.T. values) @l2isbad + + * improved plugins + + - **nfacct** reworked and now collects connection tracker information using netlink. + - **ElasticSearch** re-worked @l2isbad + - **mysql** re-worked to allow faster development of custom mysql based plugins (MySQLService) @l2isbad + - **SNMP** + - **tomcat** @NMcCloud + - **ap** (monitoring hostapd access points) + - **php_fpm** @l2isbad + - **postgres** @l2isbad + - **isc_dhcpd** @l2isbad + - **bind_rndc** @l2isbad + - **numa** + - **apps.plugin** improvements and freebsd support @vlvkobal + - **fail2ban** @l2isbad + - **freeradius** @l2isbad + - **nut** (monitoring UPSes) + - **tc** (Linux QoS) now works on qdiscs instead of classes for the same result (a lot faster) @t-h-e + - **varnish** @l2isbad + + * new and improved alarms + - **web_log**, many alarms to detect common web site/API issues + - **fping**, alarms to detect packet loss, disconnects and unusually high latency + - **cpu**, cpu utilization alarm now ignores `nice` + + * new and improved alarm notification methods + - **HipChat** to allow hosted HipChat @frei-style + - **discordapp** @lowfive + + * dashboard improvements + - dashboard now works on HiDPi screens + - dashboard now shows version of netdata + - dashboard now resets charts properly + - dashboard updated to use latest gauge.js release + + * other improvements + - thanks to @rlefevre netdata now uses a lot of different high resolution system clocks. + + netdata has received a lot more improvements from many more contributors! + + Thank you all! + + +netdata (1.5.0) - 2017-01-22 + + * yet another release that makes netdata the fastest + netdata ever! + + * netdata runs on FreeBSD, FreeNAS and MacOS ! + + Vladimir Kobal (@vlvkobal) has done a magnificent work + porting netdata to FreeBSD and MacOS. + + Everyhing works: cpu, memory, disks performance, disks space, + network interfaces, interrupts, IPv4 metrics, IPv6 metrics + processes, context switches, softnet, IPC queues, + IPC semaphores, IPC shared memory, uptime, etc. Wow! + + * netdata supports data archiving to backend databases: + + - Graphite + - OpenTSDB + - Prometheus + + and of course all the compatible ones + (KairosDB, InfluxDB, Blueflood, etc) + + * new plugins: + + Ilya Mashchenko (@l2isbad) has created most of the python + data collection plugins in this release ! + + - systemd Services (using cgroups!) + - FPing (yes, network latency in netdata!) + - postgres databases @facetoe, @moumoul + - Vanish disk cache (v3 and v4) @l2isbad + - ElasticSearch @l2isbad + - HAproxy @l2isbad + - FreeRadius @l2isbad, @lgz + - mdstat (RAID) @l2isbad + - ISC bind (via rndc) @l2isbad + - ISC dhcpd @l2isbad, @lgz + - Fail2Ban @l2isbad + - OpenVPN status log @l2isbad, @lgz + - NUMA memory @tycho + - CPU Idle @tycho + - gunicorn log @deltaskelta + - ECC memory hardware errors + - IPC semaphores + - uptime plugin (with a nice badge too) + + * improved plugins: + + - netfilter conntrack + - mysql (replication) @l2isbad + - ipfs @pjz + - cpufreq @tycho + - hddtemp @l2isbad + - sensors @l2isbad + - nginx @leolovenet + - nginx_log @paulfantom + - phpfpm @leolovenet + - redis @leolovenet + - dovecot @justohall + - cgroups + - disk space + - apps.plugin + - /proc/interrupts @rlefevre + - /proc/softirqs @rlefevre + - /proc/vmstat (system memory charts) + - /proc/net/snmp6 (IPv6 charts) + - /proc/self/meminfo (system memory charts) + - /proc/net/dev (network interfaces) + - tc (linux QoS) + + * new/improved alarms: + + - MySQL / MariaDB alarms (incl. replication) + - IPFS alarms + - HAproxy alarms + - UDP buffer alarms + - TCP AttemptFails + - ECC memory alarms + - netfilter connections alarms + - SNMP + + * new alarm notifications: + + - messagebird.com @tech-no-logical + - pagerduty.com @jimcooley + - pushbullet.com @tperalta82 + - twilio.com @shadycuz + - HipChat + - kafka + + * shell integration + + - shell scripts can now query netdata easily! + + * dashboard improvements: + - dashboard is now faster on firefox, safari, opera, edge + (edge is still the slowest) + - dashboard now has a little bigger fonts + - SHIFT + mouse wheel to zoom charts, works on all browsers + - perfect-scrollbar on the dashboard + - dashboard 4K resolution fixes + - dashboard compatibility fixes for embedding charts in + third party web sites + - charts on custom dashboards can have common min/max + even if they come from different netdata servers + - alarm log is now saved and loaded back so that + the alarm history is available at the dashboard + + * other improvements: + - python.d.plugin has received way to many improvements + from many contributors! + - charts.d.plugin can now be forked to support + multiple independent instances + - registry has been re-factored to lower its memory + requirements (required for the public registry) + - simple patterns in cgroups, disks and alarms + - netdata-installer.sh can now correctly install + netdata in containers + - supplied logrotate script compatibility fixes + - spec cleanup @breed808 + - clocks and timers reworked @rlefevre + + netdata has received a lot more improvements from many more + contributors! + + Thank you all guys! + + +netdata (1.4.0) - 2016-10-04 + + At a glance: + + - the fastest netdata ever (with a better look too)! + - improved IoT and containers support! + - alarms improved in almost every way! + + - new plugins: + softnet netdev, + extended TCP metrics, + UDPLite + NFS v2, v3 client (server was there already), + NFS v4 server & client, + APCUPSd, + RetroShare + + - improved plugins: + mysql, + cgroups, + hddtemp, + sensors, + phpfm, + tc (QoS) + + In detail: + + * improved alarms + + Many new alarms have been added to detect common kernel + configuration errors and old alarms have been re-worked + to avoid notification floods. + + Alarms now support notification hysteresis (both static + and dynamic), notification self-cancellation, dynamic + thresholds based on current alarm status + + * improved alarm notifications + + netdata now supports: + + - email notifications + - slack.com notifications on slack channels + - pushover.net notifications (mobile push notifications) + - telegram.org notifications + + For all the above methods, netdata supports role-based + notifications, with multiple recipients for each role + and severity filtering per recipient! + + Also, netdata support HTML5 notifications, while the + dashboard is open in a browser window (no need to be + the active one). + + All notifications are now clickable to get to the chart + that raised the alarm. + + * improved IoT support! + + netdata builds and runs with musl libc and runs on systems + based on busybox. + + * improved containers support! + + netdata runs on alpine linux (a low profile linux distribution + used in containers). + + * Dozens of other improvements and bugfixes + + +netdata (1.3.0) - 2016-08-28 + + At a glance: + + - netdata has health monitoring / alarms! + - netdata has badges that can be embeded anywhere! + - netdata plugins are now written in Python! + - new plugins: redis, memcached, nginx_log, ipfs, apache_cache + + IMPORTANT: + Since netdata now uses Python plugins, new packages are + required to be installed on a system to allow it work. + For more information, please check the installation page: + + https://github.com/netdata/netdata/tree/master/installer#installation + + In detail: + + * netdata has alarms! + + Based on the POLL we made on github + (https://github.com/netdata/netdata/issues/436), + health monitoring was the winner. So here it is! + + netdata now has a poweful health monitoring system embedded. + Please check the wiki page: + + https://github.com/netdata/netdata/tree/master/health + + * netdata has badges! + + netdata can generate badges with live information from the + collected metrics. + Please check the wiki page: + + https://github.com/netdata/netdata/tree/master/web/api/badges + + * netdata plugins are now written in Python! + + Thanks to the great work of Paweł Krupa (@paulfantom), most BASH + plugins have been ported to Python. + + The new python.d.plugin supports both python2 and python3 and + data collection from multiple sources for all modules. + + The following pre-existing modules have been ported to Python: + + - apache + - cpufreq + - example + - exim + - hddtemp + - mysql + - nginx + - phpfm + - postfix + - sensors + - squid + - tomcat + + The following new modules have been added: + + - apache_cache + - dovecot + - ipfs + - memcached + - nginx_log + - redis + + * other data collectors: + + - Thanks to @simonnagl netdata now reports disk space usage. + + * dashboards now transfer a certain settings from server to server + when changing servers via the my-netdata menu. + + The settings transferred are the dashboard theme, the online + help status and current pan and zoom timeframe of the dashboard. + + * API improvements: + + - reduction functions now support 'min', 'sum' and 'incremental-sum'. + + - netdata now offers a multi-threaded and a single threaded + web server (single threaded is better for IoT). + + * apps.plugin improvements: + + - can now run with command line argument 'without-files' + to prevent it from enumating all the open files/sockets/pipes + of all running processes. + + - apps.plugin now scales the collected values to match the + the total system usage. + + - apps.plugin can now report guest CPU usage per process. + + - repeating errors are now logged once per process. + + * netdata now runs with IDLE process priority (lower than nice 19) + + * netdata now instructs the kernel to kill it first when it starves + for memory. + + * netdata listens for signals: + + - SIGHUP to netdata instructs it to re-open its log files + (new logrotate files added too). + + - SIGUSR1 to netdata saves the database + + - SIGUSR2 to netdata reloads health / alarms configuration + + * netdata can now bind to multiple IPs and ports. + + * netdata now has new systemd service file (it starts as user + netdata and does not fork). + + * Dozens of other improvements and bugfixes + + +netdata (1.2.0) - 2016-05-16 + + At a glance: + + - netdata is now 30% faster + - netdata now has a registry (my-netdata dashboard menu) + - netdata now monitors Linux Containers (docker, lxc, etc) + + IMPORTANT: + This version requires libuuid. The package you need is: + + - uuid-dev (debian/ubuntu), or + - libuuid-devel (centos/fedora/redhat) + + In detail: + + * netdata is now 30% faster ! + + - Patches submitted by @fredericopissarra improved overall + netdata performance by 10%. + + - A new improved search function in the internal indexes + made all searches faster by 50%, resulting in about + 20% better performance for the core of netdata. + + - More efficient threads locking in key components + contributed to the overal efficiency. + + * netdata now has a CENTRAL REGISTRY ! + + The central registry tracks all your netdata servers + and bookmarks them for you at the 'my-netdata' menu + on all dashboards. + + Every netdata can act as a registry, but there is also + a global registry provided for free for all netdata users! + + * netdata now monitors CONTAINERS ! + + docker, lxc, or anything else. For each container it monitors + CPU, RAM, DISK I/O (network interfaces were already monitored) + + * apps.plugin: now uses linux capabilities by default + without setuid to root + + * netdata has now an improved signal handler + thanks to @simonnagl + + * API: new improved CORS support + + * SNMP: counter64 support fixed + + * MYSQL: more charts, about QCache, MyISAM key cache, + InnoDB buffer pools, open files + + * DISK charts now show mount point when available + + * Dashboard: improved support for older web browsers + and mobile web browsers (thanks to @simonnagl) + + * Multi-server dashboards now allow de-coupled refreshes for + each chart, so that if one netdata has a network latency + the other charts are not affected + + * Several other minor improvements and bugfixes + + +netdata (1.1.0) - 2016-04-20 + + Dozens of commits that improve netdata in several ways: + + - Data collection: added IPv6 monitoring + - Data collection: added SYNPROXY DDoS protection monitoring + - Data collection: apps.plugin: added charts for users and user groups + - Data collection: apps.plugin: grouping of processes now support patterns + - Data collection: apps.plugin: now it is faster, after the new features added + - Data collection: better auto-detection of partitions for disk monitoring + - Data collection: better fireqos intergation for QoS monitoring + - Data collection: squid monitoring now uses squidclient + - Data collection: SNMP monitoring now supports 64bit counters + - API: fixed issues in CSV output generation + - API: netdata can now be restricted to listen on a specific IP + - Core and apps.plugin: error log flood protection + - Dashboard: better error handling when the netdata server is unreachable + - Dashboard: each chart now has a toolbox + - Dashboard: on-line help support + - Dashboard: check for netdata updates button + - Dashboard: added example /tv.html dashboard + - Packaging: now compiles with musl libc (alpine linux) + - Packaging: added debian packaging + - Packaging: support non-root installations + - Packaging: the installer generates uninstall script + +netdata (1.0.0) - 2016-03-22 + + - first public release + +netdata (1.0.0-rc.1) - 2015-11-28 + + - initial packaging diff --git a/Makefile.am b/Makefile.am index c90db5ca3..376ccf178 100644 --- a/Makefile.am +++ b/Makefile.am @@ -25,8 +25,6 @@ EXTRA_DIST = \ .lgtm.yml \ .travis \ .github/CODEOWNERS \ - build/build.sh \ - build/Dockerfile \ build/m4/jemalloc.m4 \ build/m4/ax_c___atomic.m4 \ build/m4/ax_check_enable_debug.m4 \ @@ -49,52 +47,40 @@ EXTRA_DIST = \ SUBDIRS = \ diagrams \ - makeself \ system \ - contrib \ tests \ $(NULL) dist_noinst_DATA= \ cppcheck.sh \ configs.signatures \ - docker \ + contrib \ netdata.cppcheck \ netdata.spec \ package.json \ - doc/Add-more-charts-to-netdata.md \ - doc/Demo-Sites.md \ - doc/Donations-netdata-has-received.md \ - doc/Netdata-Security-and-Disclosure-Information.md \ - doc/Performance.md \ - doc/Running-behind-apache.md \ - doc/Running-behind-caddy.md \ - doc/Running-behind-lighttpd.md \ - doc/Running-behind-nginx.md \ - doc/Third-Party-Plugins.md \ - doc/a-github-star-is-important.md \ - doc/high-performance-netdata.md \ - doc/netdata-for-IoT.md \ - doc/netdata-security.md \ - doc/Why-Netdata.md \ - htmldoc/themes/material/partials/footer.html \ - installer/README.md \ - installer/UNINSTALL.md \ - installer/UPDATE.md \ - requirements.txt \ - runtime.txt \ + docs \ + packaging/version \ + packaging/go.d.checksums \ + packaging/installer/README.md \ + packaging/installer/UNINSTALL.md \ + packaging/installer/UPDATE.md \ + netlify.toml \ $(NULL) # until integrated within build # should be proper init.d/openrc/systemd usable dist_noinst_SCRIPTS= \ coverity-scan.sh \ - kickstart.sh \ - kickstart-static64.sh \ + packaging/installer/netdata-updater.sh \ + packaging/installer/kickstart.sh \ + packaging/installer/kickstart-static64.sh \ + packaging/installer/functions.sh \ netdata-installer.sh \ - installer/functions.sh \ - htmldoc/buildhtml.sh \ - htmldoc/buildyaml.sh \ + docs/generator/buildhtml.sh \ + docs/generator/buildyaml.sh \ + docs/generator/checklinks.sh \ + docs/generator/requirements.txt \ + docs/generator/runtime.txt \ $(NULL) # ----------------------------------------------------------------------------- @@ -120,13 +106,14 @@ AM_CFLAGS = \ $(OPTIONAL_UUID_CFLAGS) \ $(OPTIONAL_LIBCAP_LIBS) \ $(OPTIONAL_IPMIMONITORING_CFLAGS) \ + $(OPTIONAL_CUPS_CFLAGS) \ $(NULL) sbin_PROGRAMS = -dist_cache_DATA = installer/.keep -dist_varlib_DATA = installer/.keep -dist_registry_DATA = installer/.keep -dist_log_DATA = installer/.keep +dist_cache_DATA = packaging/installer/.keep +dist_varlib_DATA = packaging/installer/.keep +dist_registry_DATA = packaging/installer/.keep +dist_log_DATA = packaging/installer/.keep plugins_PROGRAMS = LIBNETDATA_FILES = \ @@ -227,6 +214,11 @@ FREEIPMI_PLUGIN_FILES = \ $(LIBNETDATA_FILES) \ $(NULL) +CUPS_PLUGIN_FILES = \ + collectors/cups.plugin/cups_plugin.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + NFACCT_PLUGIN_FILES = \ collectors/nfacct.plugin/plugin_nfacct.c \ collectors/nfacct.plugin/plugin_nfacct.h \ @@ -237,6 +229,7 @@ PROC_PLUGIN_FILES = \ collectors/proc.plugin/plugin_proc.c \ collectors/proc.plugin/plugin_proc.h \ collectors/proc.plugin/proc_diskstats.c \ + collectors/proc.plugin/proc_mdstat.c \ collectors/proc.plugin/proc_interrupts.c \ collectors/proc.plugin/proc_softirqs.c \ collectors/proc.plugin/proc_loadavg.c \ @@ -267,6 +260,7 @@ PROC_PLUGIN_FILES = \ collectors/proc.plugin/sys_devices_system_edac_mc.c \ collectors/proc.plugin/sys_devices_system_node.c \ collectors/proc.plugin/sys_fs_btrfs.c \ + collectors/proc.plugin/sys_class_power_supply.c \ $(NULL) TC_PLUGIN_FILES = \ @@ -351,6 +345,8 @@ API_PLUGIN_FILES = \ web/api/formatters/charts2json.h \ web/api/formatters/rrdset2json.c \ web/api/formatters/rrdset2json.h \ + web/api/health/health_cmdapi.c \ + web/api/health/health_cmdapi.h \ web/api/web_api_v1.c \ web/api/web_api_v1.h \ $(NULL) @@ -388,10 +384,6 @@ WEB_PLUGIN_FILES = \ web/server/web_server.h \ web/server/web_client_cache.c \ web/server/web_client_cache.h \ - web/server/single/single-threaded.c \ - web/server/single/single-threaded.h \ - web/server/multi/multi-threaded.c \ - web/server/multi/multi-threaded.h \ web/server/static/static-threaded.c \ web/server/static/static-threaded.h \ $(NULL) @@ -503,3 +495,12 @@ if ENABLE_PLUGIN_FREEIPMI $(OPTIONAL_IPMIMONITORING_LIBS) \ $(NULL) endif + +if ENABLE_PLUGIN_CUPS + plugins_PROGRAMS += cups.plugin + cups_plugin_SOURCES = $(CUPS_PLUGIN_FILES) + cups_plugin_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(OPTIONAL_CUPS_LIBS) \ + $(NULL) +endif diff --git a/Makefile.in b/Makefile.in deleted file mode 100644 index d0ad19a01..000000000 --- a/Makefile.in +++ /dev/null @@ -1,3036 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -sbin_PROGRAMS = netdata$(EXEEXT) -plugins_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2) $(am__EXEEXT_3) -@FREEBSD_TRUE@am__append_1 = \ -@FREEBSD_TRUE@ $(FREEBSD_PLUGIN_FILES) \ -@FREEBSD_TRUE@ $(NULL) - -@MACOS_TRUE@am__append_2 = \ -@MACOS_TRUE@ $(MACOS_PLUGIN_FILES) \ -@MACOS_TRUE@ $(NULL) - -@LINUX_TRUE@am__append_3 = \ -@LINUX_TRUE@ $(CGROUPS_PLUGIN_FILES) \ -@LINUX_TRUE@ $(DISKSPACE_PLUGIN_FILES) \ -@LINUX_TRUE@ $(NFACCT_PLUGIN_FILES) \ -@LINUX_TRUE@ $(PROC_PLUGIN_FILES) \ -@LINUX_TRUE@ $(TC_PLUGIN_FILES) \ -@LINUX_TRUE@ $(NULL) - -@ENABLE_PLUGIN_APPS_TRUE@am__append_4 = apps.plugin -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__append_5 = cgroup-network -@ENABLE_PLUGIN_FREEIPMI_TRUE@am__append_6 = freeipmi.plugin -subdir = . -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(top_srcdir)/configure $(am__configure_deps) \ - $(srcdir)/config.h.in $(srcdir)/netdata.spec.in \ - $(dist_noinst_SCRIPTS) depcomp $(dist_cache_DATA) \ - $(dist_log_DATA) $(dist_noinst_DATA) $(dist_registry_DATA) \ - $(dist_varlib_DATA) compile config.guess config.sub install-sh \ - missing -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ - configure.lineno config.status.lineno -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = config.h -CONFIG_CLEAN_FILES = netdata.spec -CONFIG_CLEAN_VPATH_FILES = -@ENABLE_PLUGIN_APPS_TRUE@am__EXEEXT_1 = apps.plugin$(EXEEXT) -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am__EXEEXT_2 = \ -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ cgroup-network$(EXEEXT) -@ENABLE_PLUGIN_FREEIPMI_TRUE@am__EXEEXT_3 = freeipmi.plugin$(EXEEXT) -am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(sbindir)" \ - "$(DESTDIR)$(cachedir)" "$(DESTDIR)$(logdir)" \ - "$(DESTDIR)$(registrydir)" "$(DESTDIR)$(varlibdir)" -PROGRAMS = $(plugins_PROGRAMS) $(sbin_PROGRAMS) -am__apps_plugin_SOURCES_DIST = collectors/apps.plugin/apps_plugin.c \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ - libnetdata/config/appconfig.c libnetdata/config/appconfig.h \ - libnetdata/avl/avl.c libnetdata/avl/avl.h \ - libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \ - libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \ - libnetdata/dictionary/dictionary.c \ - libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \ - libnetdata/eval/eval.h libnetdata/inlined.h \ - libnetdata/libnetdata.c libnetdata/libnetdata.h \ - libnetdata/locks/locks.c libnetdata/locks/locks.h \ - libnetdata/log/log.c libnetdata/log/log.h \ - libnetdata/popen/popen.c libnetdata/popen/popen.h \ - libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \ - libnetdata/os.c libnetdata/os.h \ - libnetdata/simple_pattern/simple_pattern.c \ - libnetdata/simple_pattern/simple_pattern.h \ - libnetdata/socket/socket.c libnetdata/socket/socket.h \ - libnetdata/statistical/statistical.c \ - libnetdata/statistical/statistical.h \ - libnetdata/storage_number/storage_number.c \ - libnetdata/storage_number/storage_number.h \ - libnetdata/threads/threads.c libnetdata/threads/threads.h \ - libnetdata/url/url.c libnetdata/url/url.h -am__dirstamp = $(am__leading_dot)dirstamp -am__objects_1 = libnetdata/adaptive_resortable_list/adaptive_resortable_list.$(OBJEXT) \ - libnetdata/config/appconfig.$(OBJEXT) \ - libnetdata/avl/avl.$(OBJEXT) \ - libnetdata/buffer/buffer.$(OBJEXT) \ - libnetdata/clocks/clocks.$(OBJEXT) \ - libnetdata/dictionary/dictionary.$(OBJEXT) \ - libnetdata/eval/eval.$(OBJEXT) libnetdata/libnetdata.$(OBJEXT) \ - libnetdata/locks/locks.$(OBJEXT) libnetdata/log/log.$(OBJEXT) \ - libnetdata/popen/popen.$(OBJEXT) \ - libnetdata/procfile/procfile.$(OBJEXT) libnetdata/os.$(OBJEXT) \ - libnetdata/simple_pattern/simple_pattern.$(OBJEXT) \ - libnetdata/socket/socket.$(OBJEXT) \ - libnetdata/statistical/statistical.$(OBJEXT) \ - libnetdata/storage_number/storage_number.$(OBJEXT) \ - libnetdata/threads/threads.$(OBJEXT) \ - libnetdata/url/url.$(OBJEXT) -am__objects_2 = collectors/apps.plugin/apps_plugin.$(OBJEXT) \ - $(am__objects_1) -@ENABLE_PLUGIN_APPS_TRUE@am_apps_plugin_OBJECTS = $(am__objects_2) -apps_plugin_OBJECTS = $(am_apps_plugin_OBJECTS) -am__DEPENDENCIES_1 = -am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ - $(am__DEPENDENCIES_1) -@ENABLE_PLUGIN_APPS_TRUE@apps_plugin_DEPENDENCIES = \ -@ENABLE_PLUGIN_APPS_TRUE@ $(am__DEPENDENCIES_2) \ -@ENABLE_PLUGIN_APPS_TRUE@ $(am__DEPENDENCIES_1) -am__cgroup_network_SOURCES_DIST = \ - collectors/cgroups.plugin/cgroup-network.c \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ - libnetdata/config/appconfig.c libnetdata/config/appconfig.h \ - libnetdata/avl/avl.c libnetdata/avl/avl.h \ - libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \ - libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \ - libnetdata/dictionary/dictionary.c \ - libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \ - libnetdata/eval/eval.h libnetdata/inlined.h \ - libnetdata/libnetdata.c libnetdata/libnetdata.h \ - libnetdata/locks/locks.c libnetdata/locks/locks.h \ - libnetdata/log/log.c libnetdata/log/log.h \ - libnetdata/popen/popen.c libnetdata/popen/popen.h \ - libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \ - libnetdata/os.c libnetdata/os.h \ - libnetdata/simple_pattern/simple_pattern.c \ - libnetdata/simple_pattern/simple_pattern.h \ - libnetdata/socket/socket.c libnetdata/socket/socket.h \ - libnetdata/statistical/statistical.c \ - libnetdata/statistical/statistical.h \ - libnetdata/storage_number/storage_number.c \ - libnetdata/storage_number/storage_number.h \ - libnetdata/threads/threads.c libnetdata/threads/threads.h \ - libnetdata/url/url.c libnetdata/url/url.h -am__objects_3 = collectors/cgroups.plugin/cgroup-network.$(OBJEXT) \ - $(am__objects_1) -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@am_cgroup_network_OBJECTS = \ -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(am__objects_3) -cgroup_network_OBJECTS = $(am_cgroup_network_OBJECTS) -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@cgroup_network_DEPENDENCIES = \ -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(am__DEPENDENCIES_2) -am__freeipmi_plugin_SOURCES_DIST = \ - collectors/freeipmi.plugin/freeipmi_plugin.c \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ - libnetdata/config/appconfig.c libnetdata/config/appconfig.h \ - libnetdata/avl/avl.c libnetdata/avl/avl.h \ - libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \ - libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \ - libnetdata/dictionary/dictionary.c \ - libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \ - libnetdata/eval/eval.h libnetdata/inlined.h \ - libnetdata/libnetdata.c libnetdata/libnetdata.h \ - libnetdata/locks/locks.c libnetdata/locks/locks.h \ - libnetdata/log/log.c libnetdata/log/log.h \ - libnetdata/popen/popen.c libnetdata/popen/popen.h \ - libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \ - libnetdata/os.c libnetdata/os.h \ - libnetdata/simple_pattern/simple_pattern.c \ - libnetdata/simple_pattern/simple_pattern.h \ - libnetdata/socket/socket.c libnetdata/socket/socket.h \ - libnetdata/statistical/statistical.c \ - libnetdata/statistical/statistical.h \ - libnetdata/storage_number/storage_number.c \ - libnetdata/storage_number/storage_number.h \ - libnetdata/threads/threads.c libnetdata/threads/threads.h \ - libnetdata/url/url.c libnetdata/url/url.h -am__objects_4 = collectors/freeipmi.plugin/freeipmi_plugin.$(OBJEXT) \ - $(am__objects_1) -@ENABLE_PLUGIN_FREEIPMI_TRUE@am_freeipmi_plugin_OBJECTS = \ -@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(am__objects_4) -freeipmi_plugin_OBJECTS = $(am_freeipmi_plugin_OBJECTS) -@ENABLE_PLUGIN_FREEIPMI_TRUE@freeipmi_plugin_DEPENDENCIES = \ -@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(am__DEPENDENCIES_2) \ -@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(am__DEPENDENCIES_1) -am__netdata_SOURCES_DIST = collectors/all.h daemon/common.c \ - daemon/common.h daemon/daemon.c daemon/daemon.h \ - daemon/global_statistics.c daemon/global_statistics.h \ - daemon/main.c daemon/main.h daemon/signals.c daemon/signals.h \ - daemon/unit_test.c daemon/unit_test.h \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ - libnetdata/config/appconfig.c libnetdata/config/appconfig.h \ - libnetdata/avl/avl.c libnetdata/avl/avl.h \ - libnetdata/buffer/buffer.c libnetdata/buffer/buffer.h \ - libnetdata/clocks/clocks.c libnetdata/clocks/clocks.h \ - libnetdata/dictionary/dictionary.c \ - libnetdata/dictionary/dictionary.h libnetdata/eval/eval.c \ - libnetdata/eval/eval.h libnetdata/inlined.h \ - libnetdata/libnetdata.c libnetdata/libnetdata.h \ - libnetdata/locks/locks.c libnetdata/locks/locks.h \ - libnetdata/log/log.c libnetdata/log/log.h \ - libnetdata/popen/popen.c libnetdata/popen/popen.h \ - libnetdata/procfile/procfile.c libnetdata/procfile/procfile.h \ - libnetdata/os.c libnetdata/os.h \ - libnetdata/simple_pattern/simple_pattern.c \ - libnetdata/simple_pattern/simple_pattern.h \ - libnetdata/socket/socket.c libnetdata/socket/socket.h \ - libnetdata/statistical/statistical.c \ - libnetdata/statistical/statistical.h \ - libnetdata/storage_number/storage_number.c \ - libnetdata/storage_number/storage_number.h \ - libnetdata/threads/threads.c libnetdata/threads/threads.h \ - libnetdata/url/url.c libnetdata/url/url.h \ - web/api/badges/web_buffer_svg.c \ - web/api/badges/web_buffer_svg.h web/api/exporters/allmetrics.c \ - web/api/exporters/allmetrics.h \ - web/api/exporters/shell/allmetrics_shell.c \ - web/api/exporters/shell/allmetrics_shell.h \ - web/api/queries/average/average.c \ - web/api/queries/average/average.h web/api/queries/des/des.c \ - web/api/queries/des/des.h \ - web/api/queries/incremental_sum/incremental_sum.c \ - web/api/queries/incremental_sum/incremental_sum.h \ - web/api/queries/max/max.c web/api/queries/max/max.h \ - web/api/queries/median/median.c \ - web/api/queries/median/median.h web/api/queries/min/min.c \ - web/api/queries/min/min.h web/api/queries/query.c \ - web/api/queries/query.h web/api/queries/rrdr.c \ - web/api/queries/rrdr.h web/api/queries/ses/ses.c \ - web/api/queries/ses/ses.h web/api/queries/stddev/stddev.c \ - web/api/queries/stddev/stddev.h web/api/queries/sum/sum.c \ - web/api/queries/sum/sum.h web/api/formatters/rrd2json.c \ - web/api/formatters/rrd2json.h web/api/formatters/csv/csv.c \ - web/api/formatters/csv/csv.h web/api/formatters/json/json.c \ - web/api/formatters/json/json.h web/api/formatters/ssv/ssv.c \ - web/api/formatters/ssv/ssv.h web/api/formatters/value/value.c \ - web/api/formatters/value/value.h \ - web/api/formatters/json_wrapper.c \ - web/api/formatters/json_wrapper.h \ - web/api/formatters/charts2json.c \ - web/api/formatters/charts2json.h \ - web/api/formatters/rrdset2json.c \ - web/api/formatters/rrdset2json.h web/api/web_api_v1.c \ - web/api/web_api_v1.h backends/backends.c backends/backends.h \ - backends/graphite/graphite.c backends/graphite/graphite.h \ - backends/json/json.c backends/json/json.h \ - backends/opentsdb/opentsdb.c backends/opentsdb/opentsdb.h \ - backends/prometheus/backend_prometheus.c \ - backends/prometheus/backend_prometheus.h \ - collectors/checks.plugin/plugin_checks.c \ - collectors/checks.plugin/plugin_checks.h health/health.c \ - health/health.h health/health_config.c health/health_json.c \ - health/health_log.c \ - collectors/idlejitter.plugin/plugin_idlejitter.c \ - collectors/idlejitter.plugin/plugin_idlejitter.h \ - collectors/plugins.d/plugins_d.c \ - collectors/plugins.d/plugins_d.h registry/registry.c \ - registry/registry.h registry/registry_db.c \ - registry/registry_init.c registry/registry_internals.c \ - registry/registry_internals.h registry/registry_log.c \ - registry/registry_machine.c registry/registry_machine.h \ - registry/registry_person.c registry/registry_person.h \ - registry/registry_url.c registry/registry_url.h \ - database/rrdcalc.c database/rrdcalc.h \ - database/rrdcalctemplate.c database/rrdcalctemplate.h \ - database/rrddim.c database/rrddimvar.c database/rrddimvar.h \ - database/rrdfamily.c database/rrdhost.c database/rrd.c \ - database/rrd.h database/rrdset.c database/rrdsetvar.c \ - database/rrdsetvar.h database/rrdvar.c database/rrdvar.h \ - streaming/rrdpush.c streaming/rrdpush.h \ - collectors/statsd.plugin/statsd.c \ - collectors/statsd.plugin/statsd.h web/server/web_client.c \ - web/server/web_client.h web/server/web_server.c \ - web/server/web_server.h web/server/web_client_cache.c \ - web/server/web_client_cache.h \ - web/server/single/single-threaded.c \ - web/server/single/single-threaded.h \ - web/server/multi/multi-threaded.c \ - web/server/multi/multi-threaded.h \ - web/server/static/static-threaded.c \ - web/server/static/static-threaded.h \ - collectors/freebsd.plugin/plugin_freebsd.c \ - collectors/freebsd.plugin/plugin_freebsd.h \ - collectors/freebsd.plugin/freebsd_sysctl.c \ - collectors/freebsd.plugin/freebsd_getmntinfo.c \ - collectors/freebsd.plugin/freebsd_getifaddrs.c \ - collectors/freebsd.plugin/freebsd_devstat.c \ - collectors/freebsd.plugin/freebsd_kstat_zfs.c \ - collectors/freebsd.plugin/freebsd_ipfw.c \ - collectors/proc.plugin/zfs_common.c \ - collectors/proc.plugin/zfs_common.h \ - collectors/macos.plugin/plugin_macos.c \ - collectors/macos.plugin/plugin_macos.h \ - collectors/macos.plugin/macos_sysctl.c \ - collectors/macos.plugin/macos_mach_smi.c \ - collectors/macos.plugin/macos_fw.c \ - collectors/cgroups.plugin/sys_fs_cgroup.c \ - collectors/cgroups.plugin/sys_fs_cgroup.h \ - collectors/diskspace.plugin/plugin_diskspace.h \ - collectors/diskspace.plugin/plugin_diskspace.c \ - collectors/nfacct.plugin/plugin_nfacct.c \ - collectors/nfacct.plugin/plugin_nfacct.h \ - collectors/proc.plugin/ipc.c \ - collectors/proc.plugin/plugin_proc.c \ - collectors/proc.plugin/plugin_proc.h \ - collectors/proc.plugin/proc_diskstats.c \ - collectors/proc.plugin/proc_interrupts.c \ - collectors/proc.plugin/proc_softirqs.c \ - collectors/proc.plugin/proc_loadavg.c \ - collectors/proc.plugin/proc_meminfo.c \ - collectors/proc.plugin/proc_net_dev.c \ - collectors/proc.plugin/proc_net_ip_vs_stats.c \ - collectors/proc.plugin/proc_net_netstat.c \ - collectors/proc.plugin/proc_net_rpc_nfs.c \ - collectors/proc.plugin/proc_net_rpc_nfsd.c \ - collectors/proc.plugin/proc_net_snmp.c \ - collectors/proc.plugin/proc_net_snmp6.c \ - collectors/proc.plugin/proc_net_sctp_snmp.c \ - collectors/proc.plugin/proc_net_sockstat.c \ - collectors/proc.plugin/proc_net_sockstat6.c \ - collectors/proc.plugin/proc_net_softnet_stat.c \ - collectors/proc.plugin/proc_net_stat_conntrack.c \ - collectors/proc.plugin/proc_net_stat_synproxy.c \ - collectors/proc.plugin/proc_self_mountinfo.c \ - collectors/proc.plugin/proc_self_mountinfo.h \ - collectors/proc.plugin/proc_spl_kstat_zfs.c \ - collectors/proc.plugin/proc_stat.c \ - collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \ - collectors/proc.plugin/proc_vmstat.c \ - collectors/proc.plugin/proc_uptime.c \ - collectors/proc.plugin/sys_kernel_mm_ksm.c \ - collectors/proc.plugin/sys_devices_system_edac_mc.c \ - collectors/proc.plugin/sys_devices_system_node.c \ - collectors/proc.plugin/sys_fs_btrfs.c \ - collectors/tc.plugin/plugin_tc.c \ - collectors/tc.plugin/plugin_tc.h -am__objects_5 = daemon/common.$(OBJEXT) daemon/daemon.$(OBJEXT) \ - daemon/global_statistics.$(OBJEXT) daemon/main.$(OBJEXT) \ - daemon/signals.$(OBJEXT) daemon/unit_test.$(OBJEXT) -am__objects_6 = web/api/badges/web_buffer_svg.$(OBJEXT) \ - web/api/exporters/allmetrics.$(OBJEXT) \ - web/api/exporters/shell/allmetrics_shell.$(OBJEXT) \ - web/api/queries/average/average.$(OBJEXT) \ - web/api/queries/des/des.$(OBJEXT) \ - web/api/queries/incremental_sum/incremental_sum.$(OBJEXT) \ - web/api/queries/max/max.$(OBJEXT) \ - web/api/queries/median/median.$(OBJEXT) \ - web/api/queries/min/min.$(OBJEXT) \ - web/api/queries/query.$(OBJEXT) web/api/queries/rrdr.$(OBJEXT) \ - web/api/queries/ses/ses.$(OBJEXT) \ - web/api/queries/stddev/stddev.$(OBJEXT) \ - web/api/queries/sum/sum.$(OBJEXT) \ - web/api/formatters/rrd2json.$(OBJEXT) \ - web/api/formatters/csv/csv.$(OBJEXT) \ - web/api/formatters/json/json.$(OBJEXT) \ - web/api/formatters/ssv/ssv.$(OBJEXT) \ - web/api/formatters/value/value.$(OBJEXT) \ - web/api/formatters/json_wrapper.$(OBJEXT) \ - web/api/formatters/charts2json.$(OBJEXT) \ - web/api/formatters/rrdset2json.$(OBJEXT) \ - web/api/web_api_v1.$(OBJEXT) -am__objects_7 = backends/backends.$(OBJEXT) \ - backends/graphite/graphite.$(OBJEXT) \ - backends/json/json.$(OBJEXT) \ - backends/opentsdb/opentsdb.$(OBJEXT) \ - backends/prometheus/backend_prometheus.$(OBJEXT) -am__objects_8 = collectors/checks.plugin/plugin_checks.$(OBJEXT) -am__objects_9 = health/health.$(OBJEXT) health/health_config.$(OBJEXT) \ - health/health_json.$(OBJEXT) health/health_log.$(OBJEXT) -am__objects_10 = \ - collectors/idlejitter.plugin/plugin_idlejitter.$(OBJEXT) -am__objects_11 = collectors/plugins.d/plugins_d.$(OBJEXT) -am__objects_12 = registry/registry.$(OBJEXT) \ - registry/registry_db.$(OBJEXT) \ - registry/registry_init.$(OBJEXT) \ - registry/registry_internals.$(OBJEXT) \ - registry/registry_log.$(OBJEXT) \ - registry/registry_machine.$(OBJEXT) \ - registry/registry_person.$(OBJEXT) \ - registry/registry_url.$(OBJEXT) -am__objects_13 = database/rrdcalc.$(OBJEXT) \ - database/rrdcalctemplate.$(OBJEXT) database/rrddim.$(OBJEXT) \ - database/rrddimvar.$(OBJEXT) database/rrdfamily.$(OBJEXT) \ - database/rrdhost.$(OBJEXT) database/rrd.$(OBJEXT) \ - database/rrdset.$(OBJEXT) database/rrdsetvar.$(OBJEXT) \ - database/rrdvar.$(OBJEXT) -am__objects_14 = streaming/rrdpush.$(OBJEXT) -am__objects_15 = collectors/statsd.plugin/statsd.$(OBJEXT) -am__objects_16 = web/server/web_client.$(OBJEXT) \ - web/server/web_server.$(OBJEXT) \ - web/server/web_client_cache.$(OBJEXT) \ - web/server/single/single-threaded.$(OBJEXT) \ - web/server/multi/multi-threaded.$(OBJEXT) \ - web/server/static/static-threaded.$(OBJEXT) -am__objects_17 = collectors/freebsd.plugin/plugin_freebsd.$(OBJEXT) \ - collectors/freebsd.plugin/freebsd_sysctl.$(OBJEXT) \ - collectors/freebsd.plugin/freebsd_getmntinfo.$(OBJEXT) \ - collectors/freebsd.plugin/freebsd_getifaddrs.$(OBJEXT) \ - collectors/freebsd.plugin/freebsd_devstat.$(OBJEXT) \ - collectors/freebsd.plugin/freebsd_kstat_zfs.$(OBJEXT) \ - collectors/freebsd.plugin/freebsd_ipfw.$(OBJEXT) \ - collectors/proc.plugin/zfs_common.$(OBJEXT) -@FREEBSD_TRUE@am__objects_18 = $(am__objects_17) -am__objects_19 = collectors/macos.plugin/plugin_macos.$(OBJEXT) \ - collectors/macos.plugin/macos_sysctl.$(OBJEXT) \ - collectors/macos.plugin/macos_mach_smi.$(OBJEXT) \ - collectors/macos.plugin/macos_fw.$(OBJEXT) -@MACOS_TRUE@am__objects_20 = $(am__objects_19) -am__objects_21 = collectors/cgroups.plugin/sys_fs_cgroup.$(OBJEXT) -am__objects_22 = \ - collectors/diskspace.plugin/plugin_diskspace.$(OBJEXT) -am__objects_23 = collectors/nfacct.plugin/plugin_nfacct.$(OBJEXT) -am__objects_24 = collectors/proc.plugin/ipc.$(OBJEXT) \ - collectors/proc.plugin/plugin_proc.$(OBJEXT) \ - collectors/proc.plugin/proc_diskstats.$(OBJEXT) \ - collectors/proc.plugin/proc_interrupts.$(OBJEXT) \ - collectors/proc.plugin/proc_softirqs.$(OBJEXT) \ - collectors/proc.plugin/proc_loadavg.$(OBJEXT) \ - collectors/proc.plugin/proc_meminfo.$(OBJEXT) \ - collectors/proc.plugin/proc_net_dev.$(OBJEXT) \ - collectors/proc.plugin/proc_net_ip_vs_stats.$(OBJEXT) \ - collectors/proc.plugin/proc_net_netstat.$(OBJEXT) \ - collectors/proc.plugin/proc_net_rpc_nfs.$(OBJEXT) \ - collectors/proc.plugin/proc_net_rpc_nfsd.$(OBJEXT) \ - collectors/proc.plugin/proc_net_snmp.$(OBJEXT) \ - collectors/proc.plugin/proc_net_snmp6.$(OBJEXT) \ - collectors/proc.plugin/proc_net_sctp_snmp.$(OBJEXT) \ - collectors/proc.plugin/proc_net_sockstat.$(OBJEXT) \ - collectors/proc.plugin/proc_net_sockstat6.$(OBJEXT) \ - collectors/proc.plugin/proc_net_softnet_stat.$(OBJEXT) \ - collectors/proc.plugin/proc_net_stat_conntrack.$(OBJEXT) \ - collectors/proc.plugin/proc_net_stat_synproxy.$(OBJEXT) \ - collectors/proc.plugin/proc_self_mountinfo.$(OBJEXT) \ - collectors/proc.plugin/zfs_common.$(OBJEXT) \ - collectors/proc.plugin/proc_spl_kstat_zfs.$(OBJEXT) \ - collectors/proc.plugin/proc_stat.$(OBJEXT) \ - collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.$(OBJEXT) \ - collectors/proc.plugin/proc_vmstat.$(OBJEXT) \ - collectors/proc.plugin/proc_uptime.$(OBJEXT) \ - collectors/proc.plugin/sys_kernel_mm_ksm.$(OBJEXT) \ - collectors/proc.plugin/sys_devices_system_edac_mc.$(OBJEXT) \ - collectors/proc.plugin/sys_devices_system_node.$(OBJEXT) \ - collectors/proc.plugin/sys_fs_btrfs.$(OBJEXT) -am__objects_25 = collectors/tc.plugin/plugin_tc.$(OBJEXT) -@LINUX_TRUE@am__objects_26 = $(am__objects_21) $(am__objects_22) \ -@LINUX_TRUE@ $(am__objects_23) $(am__objects_24) \ -@LINUX_TRUE@ $(am__objects_25) -am__objects_27 = $(am__objects_5) $(am__objects_1) $(am__objects_6) \ - $(am__objects_7) $(am__objects_8) $(am__objects_9) \ - $(am__objects_10) $(am__objects_11) $(am__objects_12) \ - $(am__objects_13) $(am__objects_14) $(am__objects_15) \ - $(am__objects_16) $(am__objects_18) $(am__objects_20) \ - $(am__objects_26) -am_netdata_OBJECTS = $(am__objects_27) -netdata_OBJECTS = $(am_netdata_OBJECTS) -netdata_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) -SCRIPTS = $(dist_noinst_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -DEFAULT_INCLUDES = -I.@am__isrc@ -depcomp = $(SHELL) $(top_srcdir)/depcomp -am__depfiles_maybe = depfiles -am__mv = mv -f -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -AM_V_CC = $(am__v_CC_@AM_V@) -am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) -am__v_CC_0 = @echo " CC " $@; -am__v_CC_1 = -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -AM_V_CCLD = $(am__v_CCLD_@AM_V@) -am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) -am__v_CCLD_0 = @echo " CCLD " $@; -am__v_CCLD_1 = -SOURCES = $(apps_plugin_SOURCES) $(cgroup_network_SOURCES) \ - $(freeipmi_plugin_SOURCES) $(netdata_SOURCES) -DIST_SOURCES = $(am__apps_plugin_SOURCES_DIST) \ - $(am__cgroup_network_SOURCES_DIST) \ - $(am__freeipmi_plugin_SOURCES_DIST) \ - $(am__netdata_SOURCES_DIST) -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -DATA = $(dist_cache_DATA) $(dist_log_DATA) $(dist_noinst_DATA) \ - $(dist_registry_DATA) $(dist_varlib_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - cscope distdir dist dist-all distcheck -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ - $(LISP)config.h.in -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -CSCOPE = cscope -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -distdir = $(PACKAGE)-$(VERSION) -top_distdir = $(distdir) -am__remove_distdir = \ - if test -d "$(distdir)"; then \ - find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ - && rm -rf "$(distdir)" \ - || { sleep 5 && rm -rf "$(distdir)"; }; \ - else :; fi -am__post_remove_distdir = $(am__remove_distdir) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -DIST_ARCHIVES = $(distdir).tar.gz -GZIP_ENV = --best -DIST_TARGETS = dist-gzip -distuninstallcheck_listfiles = find . -type f -print -am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ - | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' -distcleancheck_listfiles = find . -type f -print -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = foreign subdir-objects 1.11 -ACLOCAL_AMFLAGS = -I build/m4 -MAINTAINERCLEANFILES = \ - config.log config.status \ - $(srcdir)/Makefile.in \ - $(srcdir)/config.h.in $(srcdir)/config.h.in~ $(srcdir)/configure \ - $(srcdir)/install-sh $(srcdir)/ltmain.sh $(srcdir)/missing \ - $(srcdir)/compile $(srcdir)/depcomp $(srcdir)/aclocal.m4 \ - $(srcdir)/config.guess $(srcdir)/config.sub \ - $(srcdir)/m4/ltsugar.m4 $(srcdir)/m4/libtool.m4 \ - $(srcdir)/m4/ltversion.m4 $(srcdir)/m4/lt~obsolete.m4 \ - $(srcdir)/m4/ltoptions.m4 \ - $(srcdir)/pkcs11-helper.spec $(srcdir)/config-w32-vc.h - -EXTRA_DIST = \ - .gitignore \ - .codacy.yml \ - .codeclimate.yml \ - .csslintrc \ - .eslintignore \ - .eslintrc \ - .lgtm.yml \ - .travis \ - .github/CODEOWNERS \ - build/build.sh \ - build/Dockerfile \ - build/m4/jemalloc.m4 \ - build/m4/ax_c___atomic.m4 \ - build/m4/ax_check_enable_debug.m4 \ - build/m4/ax_c_mallinfo.m4 \ - build/m4/ax_gcc_func_attribute.m4 \ - build/m4/ax_check_compile_flag.m4 \ - build/m4/ax_c_statement_expressions.m4 \ - build/m4/ax_pthread.m4 \ - build/m4/ax_c_lto.m4 \ - build/m4/ax_c_mallopt.m4 \ - build/m4/tcmalloc.m4 \ - build/m4/ax_c__generic.m4 \ - README.md \ - CONTRIBUTORS.md \ - CODE_OF_CONDUCT.md \ - LICENSE \ - REDISTRIBUTED.md \ - CONTRIBUTING.md \ - $(NULL) - - -# ----------------------------------------------------------------------------- -# Compile netdata binaries -SUBDIRS = diagrams makeself system contrib tests $(NULL) backends \ - collectors daemon database health libnetdata registry \ - streaming web $(NULL) -dist_noinst_DATA = \ - cppcheck.sh \ - configs.signatures \ - docker \ - netdata.cppcheck \ - netdata.spec \ - package.json \ - doc/Add-more-charts-to-netdata.md \ - doc/Demo-Sites.md \ - doc/Donations-netdata-has-received.md \ - doc/Netdata-Security-and-Disclosure-Information.md \ - doc/Performance.md \ - doc/Running-behind-apache.md \ - doc/Running-behind-caddy.md \ - doc/Running-behind-lighttpd.md \ - doc/Running-behind-nginx.md \ - doc/Third-Party-Plugins.md \ - doc/a-github-star-is-important.md \ - doc/high-performance-netdata.md \ - doc/netdata-for-IoT.md \ - doc/netdata-security.md \ - doc/Why-Netdata.md \ - htmldoc/themes/material/partials/footer.html \ - installer/README.md \ - installer/UNINSTALL.md \ - installer/UPDATE.md \ - requirements.txt \ - runtime.txt \ - $(NULL) - - -# until integrated within build -# should be proper init.d/openrc/systemd usable -dist_noinst_SCRIPTS = \ - coverity-scan.sh \ - kickstart.sh \ - kickstart-static64.sh \ - netdata-installer.sh \ - installer/functions.sh \ - htmldoc/buildhtml.sh \ - htmldoc/buildyaml.sh \ - $(NULL) - -AM_CFLAGS = \ - $(OPTIONAL_MATH_CFLAGS) \ - $(OPTIONAL_NFACCT_CLFAGS) \ - $(OPTIONAL_ZLIB_CFLAGS) \ - $(OPTIONAL_UUID_CFLAGS) \ - $(OPTIONAL_LIBCAP_LIBS) \ - $(OPTIONAL_IPMIMONITORING_CFLAGS) \ - $(NULL) - -dist_cache_DATA = installer/.keep -dist_varlib_DATA = installer/.keep -dist_registry_DATA = installer/.keep -dist_log_DATA = installer/.keep -LIBNETDATA_FILES = \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ - libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ - libnetdata/config/appconfig.c \ - libnetdata/config/appconfig.h \ - libnetdata/avl/avl.c \ - libnetdata/avl/avl.h \ - libnetdata/buffer/buffer.c \ - libnetdata/buffer/buffer.h \ - libnetdata/clocks/clocks.c \ - libnetdata/clocks/clocks.h \ - libnetdata/dictionary/dictionary.c \ - libnetdata/dictionary/dictionary.h \ - libnetdata/eval/eval.c \ - libnetdata/eval/eval.h \ - libnetdata/inlined.h \ - libnetdata/libnetdata.c \ - libnetdata/libnetdata.h \ - libnetdata/locks/locks.c \ - libnetdata/locks/locks.h \ - libnetdata/log/log.c \ - libnetdata/log/log.h \ - libnetdata/popen/popen.c \ - libnetdata/popen/popen.h \ - libnetdata/procfile/procfile.c \ - libnetdata/procfile/procfile.h \ - libnetdata/os.c \ - libnetdata/os.h \ - libnetdata/simple_pattern/simple_pattern.c \ - libnetdata/simple_pattern/simple_pattern.h \ - libnetdata/socket/socket.c \ - libnetdata/socket/socket.h \ - libnetdata/statistical/statistical.c \ - libnetdata/statistical/statistical.h \ - libnetdata/storage_number/storage_number.c \ - libnetdata/storage_number/storage_number.h \ - libnetdata/threads/threads.c \ - libnetdata/threads/threads.h \ - libnetdata/url/url.c \ - libnetdata/url/url.h \ - $(NULL) - -APPS_PLUGIN_FILES = \ - collectors/apps.plugin/apps_plugin.c \ - $(LIBNETDATA_FILES) \ - $(NULL) - -CHECKS_PLUGIN_FILES = \ - collectors/checks.plugin/plugin_checks.c \ - collectors/checks.plugin/plugin_checks.h \ - $(NULL) - -FREEBSD_PLUGIN_FILES = \ - collectors/freebsd.plugin/plugin_freebsd.c \ - collectors/freebsd.plugin/plugin_freebsd.h \ - collectors/freebsd.plugin/freebsd_sysctl.c \ - collectors/freebsd.plugin/freebsd_getmntinfo.c \ - collectors/freebsd.plugin/freebsd_getifaddrs.c \ - collectors/freebsd.plugin/freebsd_devstat.c \ - collectors/freebsd.plugin/freebsd_kstat_zfs.c \ - collectors/freebsd.plugin/freebsd_ipfw.c \ - collectors/proc.plugin/zfs_common.c \ - collectors/proc.plugin/zfs_common.h \ - $(NULL) - -HEALTH_PLUGIN_FILES = \ - health/health.c \ - health/health.h \ - health/health_config.c \ - health/health_json.c \ - health/health_log.c \ - $(NULL) - -IDLEJITTER_PLUGIN_FILES = \ - collectors/idlejitter.plugin/plugin_idlejitter.c \ - collectors/idlejitter.plugin/plugin_idlejitter.h \ - $(NULL) - -CGROUPS_PLUGIN_FILES = \ - collectors/cgroups.plugin/sys_fs_cgroup.c \ - collectors/cgroups.plugin/sys_fs_cgroup.h \ - $(NULL) - -CGROUP_NETWORK_FILES = \ - collectors/cgroups.plugin/cgroup-network.c \ - $(LIBNETDATA_FILES) \ - $(NULL) - -DISKSPACE_PLUGIN_FILES = \ - collectors/diskspace.plugin/plugin_diskspace.h \ - collectors/diskspace.plugin/plugin_diskspace.c \ - $(NULL) - -FREEIPMI_PLUGIN_FILES = \ - collectors/freeipmi.plugin/freeipmi_plugin.c \ - $(LIBNETDATA_FILES) \ - $(NULL) - -NFACCT_PLUGIN_FILES = \ - collectors/nfacct.plugin/plugin_nfacct.c \ - collectors/nfacct.plugin/plugin_nfacct.h \ - $(NULL) - -PROC_PLUGIN_FILES = \ - collectors/proc.plugin/ipc.c \ - collectors/proc.plugin/plugin_proc.c \ - collectors/proc.plugin/plugin_proc.h \ - collectors/proc.plugin/proc_diskstats.c \ - collectors/proc.plugin/proc_interrupts.c \ - collectors/proc.plugin/proc_softirqs.c \ - collectors/proc.plugin/proc_loadavg.c \ - collectors/proc.plugin/proc_meminfo.c \ - collectors/proc.plugin/proc_net_dev.c \ - collectors/proc.plugin/proc_net_ip_vs_stats.c \ - collectors/proc.plugin/proc_net_netstat.c \ - collectors/proc.plugin/proc_net_rpc_nfs.c \ - collectors/proc.plugin/proc_net_rpc_nfsd.c \ - collectors/proc.plugin/proc_net_snmp.c \ - collectors/proc.plugin/proc_net_snmp6.c \ - collectors/proc.plugin/proc_net_sctp_snmp.c \ - collectors/proc.plugin/proc_net_sockstat.c \ - collectors/proc.plugin/proc_net_sockstat6.c \ - collectors/proc.plugin/proc_net_softnet_stat.c \ - collectors/proc.plugin/proc_net_stat_conntrack.c \ - collectors/proc.plugin/proc_net_stat_synproxy.c \ - collectors/proc.plugin/proc_self_mountinfo.c \ - collectors/proc.plugin/proc_self_mountinfo.h \ - collectors/proc.plugin/zfs_common.c \ - collectors/proc.plugin/zfs_common.h \ - collectors/proc.plugin/proc_spl_kstat_zfs.c \ - collectors/proc.plugin/proc_stat.c \ - collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \ - collectors/proc.plugin/proc_vmstat.c \ - collectors/proc.plugin/proc_uptime.c \ - collectors/proc.plugin/sys_kernel_mm_ksm.c \ - collectors/proc.plugin/sys_devices_system_edac_mc.c \ - collectors/proc.plugin/sys_devices_system_node.c \ - collectors/proc.plugin/sys_fs_btrfs.c \ - $(NULL) - -TC_PLUGIN_FILES = \ - collectors/tc.plugin/plugin_tc.c \ - collectors/tc.plugin/plugin_tc.h \ - $(NULL) - -MACOS_PLUGIN_FILES = \ - collectors/macos.plugin/plugin_macos.c \ - collectors/macos.plugin/plugin_macos.h \ - collectors/macos.plugin/macos_sysctl.c \ - collectors/macos.plugin/macos_mach_smi.c \ - collectors/macos.plugin/macos_fw.c \ - $(NULL) - -PLUGINSD_PLUGIN_FILES = \ - collectors/plugins.d/plugins_d.c \ - collectors/plugins.d/plugins_d.h \ - $(NULL) - -RRD_PLUGIN_FILES = \ - database/rrdcalc.c \ - database/rrdcalc.h \ - database/rrdcalctemplate.c \ - database/rrdcalctemplate.h \ - database/rrddim.c \ - database/rrddimvar.c \ - database/rrddimvar.h \ - database/rrdfamily.c \ - database/rrdhost.c \ - database/rrd.c \ - database/rrd.h \ - database/rrdset.c \ - database/rrdsetvar.c \ - database/rrdsetvar.h \ - database/rrdvar.c \ - database/rrdvar.h \ - $(NULL) - -API_PLUGIN_FILES = \ - web/api/badges/web_buffer_svg.c \ - web/api/badges/web_buffer_svg.h \ - web/api/exporters/allmetrics.c \ - web/api/exporters/allmetrics.h \ - web/api/exporters/shell/allmetrics_shell.c \ - web/api/exporters/shell/allmetrics_shell.h \ - web/api/queries/average/average.c \ - web/api/queries/average/average.h \ - web/api/queries/des/des.c \ - web/api/queries/des/des.h \ - web/api/queries/incremental_sum/incremental_sum.c \ - web/api/queries/incremental_sum/incremental_sum.h \ - web/api/queries/max/max.c \ - web/api/queries/max/max.h \ - web/api/queries/median/median.c \ - web/api/queries/median/median.h \ - web/api/queries/min/min.c \ - web/api/queries/min/min.h \ - web/api/queries/query.c \ - web/api/queries/query.h \ - web/api/queries/rrdr.c \ - web/api/queries/rrdr.h \ - web/api/queries/ses/ses.c \ - web/api/queries/ses/ses.h \ - web/api/queries/stddev/stddev.c \ - web/api/queries/stddev/stddev.h \ - web/api/queries/sum/sum.c \ - web/api/queries/sum/sum.h \ - web/api/formatters/rrd2json.c \ - web/api/formatters/rrd2json.h \ - web/api/formatters/csv/csv.c \ - web/api/formatters/csv/csv.h \ - web/api/formatters/json/json.c \ - web/api/formatters/json/json.h \ - web/api/formatters/ssv/ssv.c \ - web/api/formatters/ssv/ssv.h \ - web/api/formatters/value/value.c \ - web/api/formatters/value/value.h \ - web/api/formatters/json_wrapper.c \ - web/api/formatters/json_wrapper.h \ - web/api/formatters/charts2json.c \ - web/api/formatters/charts2json.h \ - web/api/formatters/rrdset2json.c \ - web/api/formatters/rrdset2json.h \ - web/api/web_api_v1.c \ - web/api/web_api_v1.h \ - $(NULL) - -STREAMING_PLUGIN_FILES = \ - streaming/rrdpush.c \ - streaming/rrdpush.h \ - $(NULL) - -REGISTRY_PLUGIN_FILES = \ - registry/registry.c \ - registry/registry.h \ - registry/registry_db.c \ - registry/registry_init.c \ - registry/registry_internals.c \ - registry/registry_internals.h \ - registry/registry_log.c \ - registry/registry_machine.c \ - registry/registry_machine.h \ - registry/registry_person.c \ - registry/registry_person.h \ - registry/registry_url.c \ - registry/registry_url.h \ - $(NULL) - -STATSD_PLUGIN_FILES = \ - collectors/statsd.plugin/statsd.c \ - collectors/statsd.plugin/statsd.h \ - $(NULL) - -WEB_PLUGIN_FILES = \ - web/server/web_client.c \ - web/server/web_client.h \ - web/server/web_server.c \ - web/server/web_server.h \ - web/server/web_client_cache.c \ - web/server/web_client_cache.h \ - web/server/single/single-threaded.c \ - web/server/single/single-threaded.h \ - web/server/multi/multi-threaded.c \ - web/server/multi/multi-threaded.h \ - web/server/static/static-threaded.c \ - web/server/static/static-threaded.h \ - $(NULL) - -BACKENDS_PLUGIN_FILES = \ - backends/backends.c \ - backends/backends.h \ - backends/graphite/graphite.c \ - backends/graphite/graphite.h \ - backends/json/json.c \ - backends/json/json.h \ - backends/opentsdb/opentsdb.c \ - backends/opentsdb/opentsdb.h \ - backends/prometheus/backend_prometheus.c \ - backends/prometheus/backend_prometheus.h \ - $(NULL) - -DAEMON_FILES = \ - daemon/common.c \ - daemon/common.h \ - daemon/daemon.c \ - daemon/daemon.h \ - daemon/global_statistics.c \ - daemon/global_statistics.h \ - daemon/main.c \ - daemon/main.h \ - daemon/signals.c \ - daemon/signals.h \ - daemon/unit_test.c \ - daemon/unit_test.h \ - $(NULL) - -NETDATA_FILES = collectors/all.h $(DAEMON_FILES) $(LIBNETDATA_FILES) \ - $(API_PLUGIN_FILES) $(BACKENDS_PLUGIN_FILES) \ - $(CHECKS_PLUGIN_FILES) $(HEALTH_PLUGIN_FILES) \ - $(IDLEJITTER_PLUGIN_FILES) $(PLUGINSD_PLUGIN_FILES) \ - $(REGISTRY_PLUGIN_FILES) $(RRD_PLUGIN_FILES) \ - $(STREAMING_PLUGIN_FILES) $(STATSD_PLUGIN_FILES) \ - $(WEB_PLUGIN_FILES) $(NULL) $(am__append_1) $(am__append_2) \ - $(am__append_3) -NETDATA_COMMON_LIBS = \ - $(OPTIONAL_MATH_LIBS) \ - $(OPTIONAL_ZLIB_LIBS) \ - $(OPTIONAL_UUID_LIBS) \ - $(NULL) - -netdata_SOURCES = $(NETDATA_FILES) -netdata_LDADD = \ - $(NETDATA_COMMON_LIBS) \ - $(OPTIONAL_NFACCT_LIBS) \ - $(NULL) - -@ENABLE_PLUGIN_APPS_TRUE@apps_plugin_SOURCES = $(APPS_PLUGIN_FILES) -@ENABLE_PLUGIN_APPS_TRUE@apps_plugin_LDADD = \ -@ENABLE_PLUGIN_APPS_TRUE@ $(NETDATA_COMMON_LIBS) \ -@ENABLE_PLUGIN_APPS_TRUE@ $(OPTIONAL_LIBCAP_LIBS) \ -@ENABLE_PLUGIN_APPS_TRUE@ $(NULL) - -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@cgroup_network_SOURCES = $(CGROUP_NETWORK_FILES) -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@cgroup_network_LDADD = \ -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(NETDATA_COMMON_LIBS) \ -@ENABLE_PLUGIN_CGROUP_NETWORK_TRUE@ $(NULL) - -@ENABLE_PLUGIN_FREEIPMI_TRUE@freeipmi_plugin_SOURCES = $(FREEIPMI_PLUGIN_FILES) -@ENABLE_PLUGIN_FREEIPMI_TRUE@freeipmi_plugin_LDADD = \ -@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(NETDATA_COMMON_LIBS) \ -@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(OPTIONAL_IPMIMONITORING_LIBS) \ -@ENABLE_PLUGIN_FREEIPMI_TRUE@ $(NULL) - -all: config.h - $(MAKE) $(AM_MAKEFLAGS) all-recursive - -.SUFFIXES: -.SUFFIXES: .c .o .obj -am--refresh: Makefile - @: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ - $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --foreign Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - echo ' $(SHELL) ./config.status'; \ - $(SHELL) ./config.status;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - $(SHELL) ./config.status --recheck - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - $(am__cd) $(srcdir) && $(AUTOCONF) -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) -$(am__aclocal_m4_deps): - -config.h: stamp-h1 - @test -f $@ || rm -f stamp-h1 - @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 - -stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status - @rm -f stamp-h1 - cd $(top_builddir) && $(SHELL) ./config.status config.h -$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) - rm -f stamp-h1 - touch $@ - -distclean-hdr: - -rm -f config.h stamp-h1 -netdata.spec: $(top_builddir)/config.status $(srcdir)/netdata.spec.in - cd $(top_builddir) && $(SHELL) ./config.status $@ -install-pluginsPROGRAMS: $(plugins_PROGRAMS) - @$(NORMAL_INSTALL) - @list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do echo "$$p $$p"; done | \ - sed 's/$(EXEEXT)$$//' | \ - while read p p1; do if test -f $$p \ - ; then echo "$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n;h' \ - -e 's|.*|.|' \ - -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ - sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) files[d] = files[d] " " $$1; \ - else { print "f", $$3 "/" $$4, $$1; } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-pluginsPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(plugins_PROGRAMS)'; test -n "$(pluginsdir)" || list=; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ - -e 's/$$/$(EXEEXT)/' \ - `; \ - test -n "$$list" || exit 0; \ - echo " ( cd '$(DESTDIR)$(pluginsdir)' && rm -f" $$files ")"; \ - cd "$(DESTDIR)$(pluginsdir)" && rm -f $$files - -clean-pluginsPROGRAMS: - -test -z "$(plugins_PROGRAMS)" || rm -f $(plugins_PROGRAMS) -install-sbinPROGRAMS: $(sbin_PROGRAMS) - @$(NORMAL_INSTALL) - @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(sbindir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(sbindir)" || exit 1; \ - fi; \ - for p in $$list; do echo "$$p $$p"; done | \ - sed 's/$(EXEEXT)$$//' | \ - while read p p1; do if test -f $$p \ - ; then echo "$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n;h' \ - -e 's|.*|.|' \ - -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ - sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) files[d] = files[d] " " $$1; \ - else { print "f", $$3 "/" $$4, $$1; } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ - $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-sbinPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ - -e 's/$$/$(EXEEXT)/' \ - `; \ - test -n "$$list" || exit 0; \ - echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \ - cd "$(DESTDIR)$(sbindir)" && rm -f $$files - -clean-sbinPROGRAMS: - -test -z "$(sbin_PROGRAMS)" || rm -f $(sbin_PROGRAMS) -collectors/apps.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/apps.plugin - @: > collectors/apps.plugin/$(am__dirstamp) -collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/apps.plugin/$(DEPDIR) - @: > collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/apps.plugin/apps_plugin.$(OBJEXT): \ - collectors/apps.plugin/$(am__dirstamp) \ - collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp) -libnetdata/adaptive_resortable_list/$(am__dirstamp): - @$(MKDIR_P) libnetdata/adaptive_resortable_list - @: > libnetdata/adaptive_resortable_list/$(am__dirstamp) -libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/adaptive_resortable_list/$(DEPDIR) - @: > libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp) -libnetdata/adaptive_resortable_list/adaptive_resortable_list.$(OBJEXT): \ - libnetdata/adaptive_resortable_list/$(am__dirstamp) \ - libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp) -libnetdata/config/$(am__dirstamp): - @$(MKDIR_P) libnetdata/config - @: > libnetdata/config/$(am__dirstamp) -libnetdata/config/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/config/$(DEPDIR) - @: > libnetdata/config/$(DEPDIR)/$(am__dirstamp) -libnetdata/config/appconfig.$(OBJEXT): \ - libnetdata/config/$(am__dirstamp) \ - libnetdata/config/$(DEPDIR)/$(am__dirstamp) -libnetdata/avl/$(am__dirstamp): - @$(MKDIR_P) libnetdata/avl - @: > libnetdata/avl/$(am__dirstamp) -libnetdata/avl/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/avl/$(DEPDIR) - @: > libnetdata/avl/$(DEPDIR)/$(am__dirstamp) -libnetdata/avl/avl.$(OBJEXT): libnetdata/avl/$(am__dirstamp) \ - libnetdata/avl/$(DEPDIR)/$(am__dirstamp) -libnetdata/buffer/$(am__dirstamp): - @$(MKDIR_P) libnetdata/buffer - @: > libnetdata/buffer/$(am__dirstamp) -libnetdata/buffer/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/buffer/$(DEPDIR) - @: > libnetdata/buffer/$(DEPDIR)/$(am__dirstamp) -libnetdata/buffer/buffer.$(OBJEXT): libnetdata/buffer/$(am__dirstamp) \ - libnetdata/buffer/$(DEPDIR)/$(am__dirstamp) -libnetdata/clocks/$(am__dirstamp): - @$(MKDIR_P) libnetdata/clocks - @: > libnetdata/clocks/$(am__dirstamp) -libnetdata/clocks/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/clocks/$(DEPDIR) - @: > libnetdata/clocks/$(DEPDIR)/$(am__dirstamp) -libnetdata/clocks/clocks.$(OBJEXT): libnetdata/clocks/$(am__dirstamp) \ - libnetdata/clocks/$(DEPDIR)/$(am__dirstamp) -libnetdata/dictionary/$(am__dirstamp): - @$(MKDIR_P) libnetdata/dictionary - @: > libnetdata/dictionary/$(am__dirstamp) -libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/dictionary/$(DEPDIR) - @: > libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp) -libnetdata/dictionary/dictionary.$(OBJEXT): \ - libnetdata/dictionary/$(am__dirstamp) \ - libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp) -libnetdata/eval/$(am__dirstamp): - @$(MKDIR_P) libnetdata/eval - @: > libnetdata/eval/$(am__dirstamp) -libnetdata/eval/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/eval/$(DEPDIR) - @: > libnetdata/eval/$(DEPDIR)/$(am__dirstamp) -libnetdata/eval/eval.$(OBJEXT): libnetdata/eval/$(am__dirstamp) \ - libnetdata/eval/$(DEPDIR)/$(am__dirstamp) -libnetdata/$(am__dirstamp): - @$(MKDIR_P) libnetdata - @: > libnetdata/$(am__dirstamp) -libnetdata/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/$(DEPDIR) - @: > libnetdata/$(DEPDIR)/$(am__dirstamp) -libnetdata/libnetdata.$(OBJEXT): libnetdata/$(am__dirstamp) \ - libnetdata/$(DEPDIR)/$(am__dirstamp) -libnetdata/locks/$(am__dirstamp): - @$(MKDIR_P) libnetdata/locks - @: > libnetdata/locks/$(am__dirstamp) -libnetdata/locks/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/locks/$(DEPDIR) - @: > libnetdata/locks/$(DEPDIR)/$(am__dirstamp) -libnetdata/locks/locks.$(OBJEXT): libnetdata/locks/$(am__dirstamp) \ - libnetdata/locks/$(DEPDIR)/$(am__dirstamp) -libnetdata/log/$(am__dirstamp): - @$(MKDIR_P) libnetdata/log - @: > libnetdata/log/$(am__dirstamp) -libnetdata/log/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/log/$(DEPDIR) - @: > libnetdata/log/$(DEPDIR)/$(am__dirstamp) -libnetdata/log/log.$(OBJEXT): libnetdata/log/$(am__dirstamp) \ - libnetdata/log/$(DEPDIR)/$(am__dirstamp) -libnetdata/popen/$(am__dirstamp): - @$(MKDIR_P) libnetdata/popen - @: > libnetdata/popen/$(am__dirstamp) -libnetdata/popen/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/popen/$(DEPDIR) - @: > libnetdata/popen/$(DEPDIR)/$(am__dirstamp) -libnetdata/popen/popen.$(OBJEXT): libnetdata/popen/$(am__dirstamp) \ - libnetdata/popen/$(DEPDIR)/$(am__dirstamp) -libnetdata/procfile/$(am__dirstamp): - @$(MKDIR_P) libnetdata/procfile - @: > libnetdata/procfile/$(am__dirstamp) -libnetdata/procfile/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/procfile/$(DEPDIR) - @: > libnetdata/procfile/$(DEPDIR)/$(am__dirstamp) -libnetdata/procfile/procfile.$(OBJEXT): \ - libnetdata/procfile/$(am__dirstamp) \ - libnetdata/procfile/$(DEPDIR)/$(am__dirstamp) -libnetdata/os.$(OBJEXT): libnetdata/$(am__dirstamp) \ - libnetdata/$(DEPDIR)/$(am__dirstamp) -libnetdata/simple_pattern/$(am__dirstamp): - @$(MKDIR_P) libnetdata/simple_pattern - @: > libnetdata/simple_pattern/$(am__dirstamp) -libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/simple_pattern/$(DEPDIR) - @: > libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp) -libnetdata/simple_pattern/simple_pattern.$(OBJEXT): \ - libnetdata/simple_pattern/$(am__dirstamp) \ - libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp) -libnetdata/socket/$(am__dirstamp): - @$(MKDIR_P) libnetdata/socket - @: > libnetdata/socket/$(am__dirstamp) -libnetdata/socket/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/socket/$(DEPDIR) - @: > libnetdata/socket/$(DEPDIR)/$(am__dirstamp) -libnetdata/socket/socket.$(OBJEXT): libnetdata/socket/$(am__dirstamp) \ - libnetdata/socket/$(DEPDIR)/$(am__dirstamp) -libnetdata/statistical/$(am__dirstamp): - @$(MKDIR_P) libnetdata/statistical - @: > libnetdata/statistical/$(am__dirstamp) -libnetdata/statistical/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/statistical/$(DEPDIR) - @: > libnetdata/statistical/$(DEPDIR)/$(am__dirstamp) -libnetdata/statistical/statistical.$(OBJEXT): \ - libnetdata/statistical/$(am__dirstamp) \ - libnetdata/statistical/$(DEPDIR)/$(am__dirstamp) -libnetdata/storage_number/$(am__dirstamp): - @$(MKDIR_P) libnetdata/storage_number - @: > libnetdata/storage_number/$(am__dirstamp) -libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/storage_number/$(DEPDIR) - @: > libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp) -libnetdata/storage_number/storage_number.$(OBJEXT): \ - libnetdata/storage_number/$(am__dirstamp) \ - libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp) -libnetdata/threads/$(am__dirstamp): - @$(MKDIR_P) libnetdata/threads - @: > libnetdata/threads/$(am__dirstamp) -libnetdata/threads/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/threads/$(DEPDIR) - @: > libnetdata/threads/$(DEPDIR)/$(am__dirstamp) -libnetdata/threads/threads.$(OBJEXT): \ - libnetdata/threads/$(am__dirstamp) \ - libnetdata/threads/$(DEPDIR)/$(am__dirstamp) -libnetdata/url/$(am__dirstamp): - @$(MKDIR_P) libnetdata/url - @: > libnetdata/url/$(am__dirstamp) -libnetdata/url/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) libnetdata/url/$(DEPDIR) - @: > libnetdata/url/$(DEPDIR)/$(am__dirstamp) -libnetdata/url/url.$(OBJEXT): libnetdata/url/$(am__dirstamp) \ - libnetdata/url/$(DEPDIR)/$(am__dirstamp) - -apps.plugin$(EXEEXT): $(apps_plugin_OBJECTS) $(apps_plugin_DEPENDENCIES) $(EXTRA_apps_plugin_DEPENDENCIES) - @rm -f apps.plugin$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(apps_plugin_OBJECTS) $(apps_plugin_LDADD) $(LIBS) -collectors/cgroups.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/cgroups.plugin - @: > collectors/cgroups.plugin/$(am__dirstamp) -collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/cgroups.plugin/$(DEPDIR) - @: > collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/cgroups.plugin/cgroup-network.$(OBJEXT): \ - collectors/cgroups.plugin/$(am__dirstamp) \ - collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp) - -cgroup-network$(EXEEXT): $(cgroup_network_OBJECTS) $(cgroup_network_DEPENDENCIES) $(EXTRA_cgroup_network_DEPENDENCIES) - @rm -f cgroup-network$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(cgroup_network_OBJECTS) $(cgroup_network_LDADD) $(LIBS) -collectors/freeipmi.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/freeipmi.plugin - @: > collectors/freeipmi.plugin/$(am__dirstamp) -collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/freeipmi.plugin/$(DEPDIR) - @: > collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/freeipmi.plugin/freeipmi_plugin.$(OBJEXT): \ - collectors/freeipmi.plugin/$(am__dirstamp) \ - collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp) - -freeipmi.plugin$(EXEEXT): $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_DEPENDENCIES) $(EXTRA_freeipmi_plugin_DEPENDENCIES) - @rm -f freeipmi.plugin$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(freeipmi_plugin_OBJECTS) $(freeipmi_plugin_LDADD) $(LIBS) -daemon/$(am__dirstamp): - @$(MKDIR_P) daemon - @: > daemon/$(am__dirstamp) -daemon/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) daemon/$(DEPDIR) - @: > daemon/$(DEPDIR)/$(am__dirstamp) -daemon/common.$(OBJEXT): daemon/$(am__dirstamp) \ - daemon/$(DEPDIR)/$(am__dirstamp) -daemon/daemon.$(OBJEXT): daemon/$(am__dirstamp) \ - daemon/$(DEPDIR)/$(am__dirstamp) -daemon/global_statistics.$(OBJEXT): daemon/$(am__dirstamp) \ - daemon/$(DEPDIR)/$(am__dirstamp) -daemon/main.$(OBJEXT): daemon/$(am__dirstamp) \ - daemon/$(DEPDIR)/$(am__dirstamp) -daemon/signals.$(OBJEXT): daemon/$(am__dirstamp) \ - daemon/$(DEPDIR)/$(am__dirstamp) -daemon/unit_test.$(OBJEXT): daemon/$(am__dirstamp) \ - daemon/$(DEPDIR)/$(am__dirstamp) -web/api/badges/$(am__dirstamp): - @$(MKDIR_P) web/api/badges - @: > web/api/badges/$(am__dirstamp) -web/api/badges/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/badges/$(DEPDIR) - @: > web/api/badges/$(DEPDIR)/$(am__dirstamp) -web/api/badges/web_buffer_svg.$(OBJEXT): \ - web/api/badges/$(am__dirstamp) \ - web/api/badges/$(DEPDIR)/$(am__dirstamp) -web/api/exporters/$(am__dirstamp): - @$(MKDIR_P) web/api/exporters - @: > web/api/exporters/$(am__dirstamp) -web/api/exporters/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/exporters/$(DEPDIR) - @: > web/api/exporters/$(DEPDIR)/$(am__dirstamp) -web/api/exporters/allmetrics.$(OBJEXT): \ - web/api/exporters/$(am__dirstamp) \ - web/api/exporters/$(DEPDIR)/$(am__dirstamp) -web/api/exporters/shell/$(am__dirstamp): - @$(MKDIR_P) web/api/exporters/shell - @: > web/api/exporters/shell/$(am__dirstamp) -web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/exporters/shell/$(DEPDIR) - @: > web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp) -web/api/exporters/shell/allmetrics_shell.$(OBJEXT): \ - web/api/exporters/shell/$(am__dirstamp) \ - web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp) -web/api/queries/average/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/average - @: > web/api/queries/average/$(am__dirstamp) -web/api/queries/average/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/average/$(DEPDIR) - @: > web/api/queries/average/$(DEPDIR)/$(am__dirstamp) -web/api/queries/average/average.$(OBJEXT): \ - web/api/queries/average/$(am__dirstamp) \ - web/api/queries/average/$(DEPDIR)/$(am__dirstamp) -web/api/queries/des/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/des - @: > web/api/queries/des/$(am__dirstamp) -web/api/queries/des/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/des/$(DEPDIR) - @: > web/api/queries/des/$(DEPDIR)/$(am__dirstamp) -web/api/queries/des/des.$(OBJEXT): \ - web/api/queries/des/$(am__dirstamp) \ - web/api/queries/des/$(DEPDIR)/$(am__dirstamp) -web/api/queries/incremental_sum/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/incremental_sum - @: > web/api/queries/incremental_sum/$(am__dirstamp) -web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/incremental_sum/$(DEPDIR) - @: > web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp) -web/api/queries/incremental_sum/incremental_sum.$(OBJEXT): \ - web/api/queries/incremental_sum/$(am__dirstamp) \ - web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp) -web/api/queries/max/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/max - @: > web/api/queries/max/$(am__dirstamp) -web/api/queries/max/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/max/$(DEPDIR) - @: > web/api/queries/max/$(DEPDIR)/$(am__dirstamp) -web/api/queries/max/max.$(OBJEXT): \ - web/api/queries/max/$(am__dirstamp) \ - web/api/queries/max/$(DEPDIR)/$(am__dirstamp) -web/api/queries/median/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/median - @: > web/api/queries/median/$(am__dirstamp) -web/api/queries/median/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/median/$(DEPDIR) - @: > web/api/queries/median/$(DEPDIR)/$(am__dirstamp) -web/api/queries/median/median.$(OBJEXT): \ - web/api/queries/median/$(am__dirstamp) \ - web/api/queries/median/$(DEPDIR)/$(am__dirstamp) -web/api/queries/min/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/min - @: > web/api/queries/min/$(am__dirstamp) -web/api/queries/min/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/min/$(DEPDIR) - @: > web/api/queries/min/$(DEPDIR)/$(am__dirstamp) -web/api/queries/min/min.$(OBJEXT): \ - web/api/queries/min/$(am__dirstamp) \ - web/api/queries/min/$(DEPDIR)/$(am__dirstamp) -web/api/queries/$(am__dirstamp): - @$(MKDIR_P) web/api/queries - @: > web/api/queries/$(am__dirstamp) -web/api/queries/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/$(DEPDIR) - @: > web/api/queries/$(DEPDIR)/$(am__dirstamp) -web/api/queries/query.$(OBJEXT): web/api/queries/$(am__dirstamp) \ - web/api/queries/$(DEPDIR)/$(am__dirstamp) -web/api/queries/rrdr.$(OBJEXT): web/api/queries/$(am__dirstamp) \ - web/api/queries/$(DEPDIR)/$(am__dirstamp) -web/api/queries/ses/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/ses - @: > web/api/queries/ses/$(am__dirstamp) -web/api/queries/ses/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/ses/$(DEPDIR) - @: > web/api/queries/ses/$(DEPDIR)/$(am__dirstamp) -web/api/queries/ses/ses.$(OBJEXT): \ - web/api/queries/ses/$(am__dirstamp) \ - web/api/queries/ses/$(DEPDIR)/$(am__dirstamp) -web/api/queries/stddev/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/stddev - @: > web/api/queries/stddev/$(am__dirstamp) -web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/stddev/$(DEPDIR) - @: > web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp) -web/api/queries/stddev/stddev.$(OBJEXT): \ - web/api/queries/stddev/$(am__dirstamp) \ - web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp) -web/api/queries/sum/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/sum - @: > web/api/queries/sum/$(am__dirstamp) -web/api/queries/sum/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/queries/sum/$(DEPDIR) - @: > web/api/queries/sum/$(DEPDIR)/$(am__dirstamp) -web/api/queries/sum/sum.$(OBJEXT): \ - web/api/queries/sum/$(am__dirstamp) \ - web/api/queries/sum/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters - @: > web/api/formatters/$(am__dirstamp) -web/api/formatters/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters/$(DEPDIR) - @: > web/api/formatters/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/rrd2json.$(OBJEXT): \ - web/api/formatters/$(am__dirstamp) \ - web/api/formatters/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/csv/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters/csv - @: > web/api/formatters/csv/$(am__dirstamp) -web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters/csv/$(DEPDIR) - @: > web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/csv/csv.$(OBJEXT): \ - web/api/formatters/csv/$(am__dirstamp) \ - web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/json/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters/json - @: > web/api/formatters/json/$(am__dirstamp) -web/api/formatters/json/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters/json/$(DEPDIR) - @: > web/api/formatters/json/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/json/json.$(OBJEXT): \ - web/api/formatters/json/$(am__dirstamp) \ - web/api/formatters/json/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/ssv/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters/ssv - @: > web/api/formatters/ssv/$(am__dirstamp) -web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters/ssv/$(DEPDIR) - @: > web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/ssv/ssv.$(OBJEXT): \ - web/api/formatters/ssv/$(am__dirstamp) \ - web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/value/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters/value - @: > web/api/formatters/value/$(am__dirstamp) -web/api/formatters/value/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/formatters/value/$(DEPDIR) - @: > web/api/formatters/value/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/value/value.$(OBJEXT): \ - web/api/formatters/value/$(am__dirstamp) \ - web/api/formatters/value/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/json_wrapper.$(OBJEXT): \ - web/api/formatters/$(am__dirstamp) \ - web/api/formatters/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/charts2json.$(OBJEXT): \ - web/api/formatters/$(am__dirstamp) \ - web/api/formatters/$(DEPDIR)/$(am__dirstamp) -web/api/formatters/rrdset2json.$(OBJEXT): \ - web/api/formatters/$(am__dirstamp) \ - web/api/formatters/$(DEPDIR)/$(am__dirstamp) -web/api/$(am__dirstamp): - @$(MKDIR_P) web/api - @: > web/api/$(am__dirstamp) -web/api/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/api/$(DEPDIR) - @: > web/api/$(DEPDIR)/$(am__dirstamp) -web/api/web_api_v1.$(OBJEXT): web/api/$(am__dirstamp) \ - web/api/$(DEPDIR)/$(am__dirstamp) -backends/$(am__dirstamp): - @$(MKDIR_P) backends - @: > backends/$(am__dirstamp) -backends/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) backends/$(DEPDIR) - @: > backends/$(DEPDIR)/$(am__dirstamp) -backends/backends.$(OBJEXT): backends/$(am__dirstamp) \ - backends/$(DEPDIR)/$(am__dirstamp) -backends/graphite/$(am__dirstamp): - @$(MKDIR_P) backends/graphite - @: > backends/graphite/$(am__dirstamp) -backends/graphite/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) backends/graphite/$(DEPDIR) - @: > backends/graphite/$(DEPDIR)/$(am__dirstamp) -backends/graphite/graphite.$(OBJEXT): \ - backends/graphite/$(am__dirstamp) \ - backends/graphite/$(DEPDIR)/$(am__dirstamp) -backends/json/$(am__dirstamp): - @$(MKDIR_P) backends/json - @: > backends/json/$(am__dirstamp) -backends/json/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) backends/json/$(DEPDIR) - @: > backends/json/$(DEPDIR)/$(am__dirstamp) -backends/json/json.$(OBJEXT): backends/json/$(am__dirstamp) \ - backends/json/$(DEPDIR)/$(am__dirstamp) -backends/opentsdb/$(am__dirstamp): - @$(MKDIR_P) backends/opentsdb - @: > backends/opentsdb/$(am__dirstamp) -backends/opentsdb/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) backends/opentsdb/$(DEPDIR) - @: > backends/opentsdb/$(DEPDIR)/$(am__dirstamp) -backends/opentsdb/opentsdb.$(OBJEXT): \ - backends/opentsdb/$(am__dirstamp) \ - backends/opentsdb/$(DEPDIR)/$(am__dirstamp) -backends/prometheus/$(am__dirstamp): - @$(MKDIR_P) backends/prometheus - @: > backends/prometheus/$(am__dirstamp) -backends/prometheus/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) backends/prometheus/$(DEPDIR) - @: > backends/prometheus/$(DEPDIR)/$(am__dirstamp) -backends/prometheus/backend_prometheus.$(OBJEXT): \ - backends/prometheus/$(am__dirstamp) \ - backends/prometheus/$(DEPDIR)/$(am__dirstamp) -collectors/checks.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/checks.plugin - @: > collectors/checks.plugin/$(am__dirstamp) -collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/checks.plugin/$(DEPDIR) - @: > collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/checks.plugin/plugin_checks.$(OBJEXT): \ - collectors/checks.plugin/$(am__dirstamp) \ - collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp) -health/$(am__dirstamp): - @$(MKDIR_P) health - @: > health/$(am__dirstamp) -health/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) health/$(DEPDIR) - @: > health/$(DEPDIR)/$(am__dirstamp) -health/health.$(OBJEXT): health/$(am__dirstamp) \ - health/$(DEPDIR)/$(am__dirstamp) -health/health_config.$(OBJEXT): health/$(am__dirstamp) \ - health/$(DEPDIR)/$(am__dirstamp) -health/health_json.$(OBJEXT): health/$(am__dirstamp) \ - health/$(DEPDIR)/$(am__dirstamp) -health/health_log.$(OBJEXT): health/$(am__dirstamp) \ - health/$(DEPDIR)/$(am__dirstamp) -collectors/idlejitter.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/idlejitter.plugin - @: > collectors/idlejitter.plugin/$(am__dirstamp) -collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/idlejitter.plugin/$(DEPDIR) - @: > collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/idlejitter.plugin/plugin_idlejitter.$(OBJEXT): \ - collectors/idlejitter.plugin/$(am__dirstamp) \ - collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/plugins.d/$(am__dirstamp): - @$(MKDIR_P) collectors/plugins.d - @: > collectors/plugins.d/$(am__dirstamp) -collectors/plugins.d/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/plugins.d/$(DEPDIR) - @: > collectors/plugins.d/$(DEPDIR)/$(am__dirstamp) -collectors/plugins.d/plugins_d.$(OBJEXT): \ - collectors/plugins.d/$(am__dirstamp) \ - collectors/plugins.d/$(DEPDIR)/$(am__dirstamp) -registry/$(am__dirstamp): - @$(MKDIR_P) registry - @: > registry/$(am__dirstamp) -registry/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) registry/$(DEPDIR) - @: > registry/$(DEPDIR)/$(am__dirstamp) -registry/registry.$(OBJEXT): registry/$(am__dirstamp) \ - registry/$(DEPDIR)/$(am__dirstamp) -registry/registry_db.$(OBJEXT): registry/$(am__dirstamp) \ - registry/$(DEPDIR)/$(am__dirstamp) -registry/registry_init.$(OBJEXT): registry/$(am__dirstamp) \ - registry/$(DEPDIR)/$(am__dirstamp) -registry/registry_internals.$(OBJEXT): registry/$(am__dirstamp) \ - registry/$(DEPDIR)/$(am__dirstamp) -registry/registry_log.$(OBJEXT): registry/$(am__dirstamp) \ - registry/$(DEPDIR)/$(am__dirstamp) -registry/registry_machine.$(OBJEXT): registry/$(am__dirstamp) \ - registry/$(DEPDIR)/$(am__dirstamp) -registry/registry_person.$(OBJEXT): registry/$(am__dirstamp) \ - registry/$(DEPDIR)/$(am__dirstamp) -registry/registry_url.$(OBJEXT): registry/$(am__dirstamp) \ - registry/$(DEPDIR)/$(am__dirstamp) -database/$(am__dirstamp): - @$(MKDIR_P) database - @: > database/$(am__dirstamp) -database/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) database/$(DEPDIR) - @: > database/$(DEPDIR)/$(am__dirstamp) -database/rrdcalc.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -database/rrdcalctemplate.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -database/rrddim.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -database/rrddimvar.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -database/rrdfamily.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -database/rrdhost.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -database/rrd.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -database/rrdset.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -database/rrdsetvar.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -database/rrdvar.$(OBJEXT): database/$(am__dirstamp) \ - database/$(DEPDIR)/$(am__dirstamp) -streaming/$(am__dirstamp): - @$(MKDIR_P) streaming - @: > streaming/$(am__dirstamp) -streaming/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) streaming/$(DEPDIR) - @: > streaming/$(DEPDIR)/$(am__dirstamp) -streaming/rrdpush.$(OBJEXT): streaming/$(am__dirstamp) \ - streaming/$(DEPDIR)/$(am__dirstamp) -collectors/statsd.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/statsd.plugin - @: > collectors/statsd.plugin/$(am__dirstamp) -collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/statsd.plugin/$(DEPDIR) - @: > collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/statsd.plugin/statsd.$(OBJEXT): \ - collectors/statsd.plugin/$(am__dirstamp) \ - collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp) -web/server/$(am__dirstamp): - @$(MKDIR_P) web/server - @: > web/server/$(am__dirstamp) -web/server/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/server/$(DEPDIR) - @: > web/server/$(DEPDIR)/$(am__dirstamp) -web/server/web_client.$(OBJEXT): web/server/$(am__dirstamp) \ - web/server/$(DEPDIR)/$(am__dirstamp) -web/server/web_server.$(OBJEXT): web/server/$(am__dirstamp) \ - web/server/$(DEPDIR)/$(am__dirstamp) -web/server/web_client_cache.$(OBJEXT): web/server/$(am__dirstamp) \ - web/server/$(DEPDIR)/$(am__dirstamp) -web/server/single/$(am__dirstamp): - @$(MKDIR_P) web/server/single - @: > web/server/single/$(am__dirstamp) -web/server/single/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/server/single/$(DEPDIR) - @: > web/server/single/$(DEPDIR)/$(am__dirstamp) -web/server/single/single-threaded.$(OBJEXT): \ - web/server/single/$(am__dirstamp) \ - web/server/single/$(DEPDIR)/$(am__dirstamp) -web/server/multi/$(am__dirstamp): - @$(MKDIR_P) web/server/multi - @: > web/server/multi/$(am__dirstamp) -web/server/multi/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/server/multi/$(DEPDIR) - @: > web/server/multi/$(DEPDIR)/$(am__dirstamp) -web/server/multi/multi-threaded.$(OBJEXT): \ - web/server/multi/$(am__dirstamp) \ - web/server/multi/$(DEPDIR)/$(am__dirstamp) -web/server/static/$(am__dirstamp): - @$(MKDIR_P) web/server/static - @: > web/server/static/$(am__dirstamp) -web/server/static/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) web/server/static/$(DEPDIR) - @: > web/server/static/$(DEPDIR)/$(am__dirstamp) -web/server/static/static-threaded.$(OBJEXT): \ - web/server/static/$(am__dirstamp) \ - web/server/static/$(DEPDIR)/$(am__dirstamp) -collectors/freebsd.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/freebsd.plugin - @: > collectors/freebsd.plugin/$(am__dirstamp) -collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/freebsd.plugin/$(DEPDIR) - @: > collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/freebsd.plugin/plugin_freebsd.$(OBJEXT): \ - collectors/freebsd.plugin/$(am__dirstamp) \ - collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/freebsd.plugin/freebsd_sysctl.$(OBJEXT): \ - collectors/freebsd.plugin/$(am__dirstamp) \ - collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/freebsd.plugin/freebsd_getmntinfo.$(OBJEXT): \ - collectors/freebsd.plugin/$(am__dirstamp) \ - collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/freebsd.plugin/freebsd_getifaddrs.$(OBJEXT): \ - collectors/freebsd.plugin/$(am__dirstamp) \ - collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/freebsd.plugin/freebsd_devstat.$(OBJEXT): \ - collectors/freebsd.plugin/$(am__dirstamp) \ - collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/freebsd.plugin/freebsd_kstat_zfs.$(OBJEXT): \ - collectors/freebsd.plugin/$(am__dirstamp) \ - collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/freebsd.plugin/freebsd_ipfw.$(OBJEXT): \ - collectors/freebsd.plugin/$(am__dirstamp) \ - collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/proc.plugin - @: > collectors/proc.plugin/$(am__dirstamp) -collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/proc.plugin/$(DEPDIR) - @: > collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/zfs_common.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/macos.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/macos.plugin - @: > collectors/macos.plugin/$(am__dirstamp) -collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/macos.plugin/$(DEPDIR) - @: > collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/macos.plugin/plugin_macos.$(OBJEXT): \ - collectors/macos.plugin/$(am__dirstamp) \ - collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/macos.plugin/macos_sysctl.$(OBJEXT): \ - collectors/macos.plugin/$(am__dirstamp) \ - collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/macos.plugin/macos_mach_smi.$(OBJEXT): \ - collectors/macos.plugin/$(am__dirstamp) \ - collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/macos.plugin/macos_fw.$(OBJEXT): \ - collectors/macos.plugin/$(am__dirstamp) \ - collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/cgroups.plugin/sys_fs_cgroup.$(OBJEXT): \ - collectors/cgroups.plugin/$(am__dirstamp) \ - collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/diskspace.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/diskspace.plugin - @: > collectors/diskspace.plugin/$(am__dirstamp) -collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/diskspace.plugin/$(DEPDIR) - @: > collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/diskspace.plugin/plugin_diskspace.$(OBJEXT): \ - collectors/diskspace.plugin/$(am__dirstamp) \ - collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/nfacct.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/nfacct.plugin - @: > collectors/nfacct.plugin/$(am__dirstamp) -collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/nfacct.plugin/$(DEPDIR) - @: > collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/nfacct.plugin/plugin_nfacct.$(OBJEXT): \ - collectors/nfacct.plugin/$(am__dirstamp) \ - collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/ipc.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/plugin_proc.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_diskstats.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_interrupts.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_softirqs.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_loadavg.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_meminfo.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_dev.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_ip_vs_stats.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_netstat.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_rpc_nfs.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_rpc_nfsd.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_snmp.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_snmp6.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_sctp_snmp.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_sockstat.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_sockstat6.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_softnet_stat.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_stat_conntrack.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_net_stat_synproxy.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_self_mountinfo.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_spl_kstat_zfs.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_stat.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_vmstat.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/proc_uptime.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/sys_kernel_mm_ksm.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/sys_devices_system_edac_mc.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/sys_devices_system_node.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/proc.plugin/sys_fs_btrfs.$(OBJEXT): \ - collectors/proc.plugin/$(am__dirstamp) \ - collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/tc.plugin/$(am__dirstamp): - @$(MKDIR_P) collectors/tc.plugin - @: > collectors/tc.plugin/$(am__dirstamp) -collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp): - @$(MKDIR_P) collectors/tc.plugin/$(DEPDIR) - @: > collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp) -collectors/tc.plugin/plugin_tc.$(OBJEXT): \ - collectors/tc.plugin/$(am__dirstamp) \ - collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp) - -netdata$(EXEEXT): $(netdata_OBJECTS) $(netdata_DEPENDENCIES) $(EXTRA_netdata_DEPENDENCIES) - @rm -f netdata$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(netdata_OBJECTS) $(netdata_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -rm -f backends/*.$(OBJEXT) - -rm -f backends/graphite/*.$(OBJEXT) - -rm -f backends/json/*.$(OBJEXT) - -rm -f backends/opentsdb/*.$(OBJEXT) - -rm -f backends/prometheus/*.$(OBJEXT) - -rm -f collectors/apps.plugin/*.$(OBJEXT) - -rm -f collectors/cgroups.plugin/*.$(OBJEXT) - -rm -f collectors/checks.plugin/*.$(OBJEXT) - -rm -f collectors/diskspace.plugin/*.$(OBJEXT) - -rm -f collectors/freebsd.plugin/*.$(OBJEXT) - -rm -f collectors/freeipmi.plugin/*.$(OBJEXT) - -rm -f collectors/idlejitter.plugin/*.$(OBJEXT) - -rm -f collectors/macos.plugin/*.$(OBJEXT) - -rm -f collectors/nfacct.plugin/*.$(OBJEXT) - -rm -f collectors/plugins.d/*.$(OBJEXT) - -rm -f collectors/proc.plugin/*.$(OBJEXT) - -rm -f collectors/statsd.plugin/*.$(OBJEXT) - -rm -f collectors/tc.plugin/*.$(OBJEXT) - -rm -f daemon/*.$(OBJEXT) - -rm -f database/*.$(OBJEXT) - -rm -f health/*.$(OBJEXT) - -rm -f libnetdata/*.$(OBJEXT) - -rm -f libnetdata/adaptive_resortable_list/*.$(OBJEXT) - -rm -f libnetdata/avl/*.$(OBJEXT) - -rm -f libnetdata/buffer/*.$(OBJEXT) - -rm -f libnetdata/clocks/*.$(OBJEXT) - -rm -f libnetdata/config/*.$(OBJEXT) - -rm -f libnetdata/dictionary/*.$(OBJEXT) - -rm -f libnetdata/eval/*.$(OBJEXT) - -rm -f libnetdata/locks/*.$(OBJEXT) - -rm -f libnetdata/log/*.$(OBJEXT) - -rm -f libnetdata/popen/*.$(OBJEXT) - -rm -f libnetdata/procfile/*.$(OBJEXT) - -rm -f libnetdata/simple_pattern/*.$(OBJEXT) - -rm -f libnetdata/socket/*.$(OBJEXT) - -rm -f libnetdata/statistical/*.$(OBJEXT) - -rm -f libnetdata/storage_number/*.$(OBJEXT) - -rm -f libnetdata/threads/*.$(OBJEXT) - -rm -f libnetdata/url/*.$(OBJEXT) - -rm -f registry/*.$(OBJEXT) - -rm -f streaming/*.$(OBJEXT) - -rm -f web/api/*.$(OBJEXT) - -rm -f web/api/badges/*.$(OBJEXT) - -rm -f web/api/exporters/*.$(OBJEXT) - -rm -f web/api/exporters/shell/*.$(OBJEXT) - -rm -f web/api/formatters/*.$(OBJEXT) - -rm -f web/api/formatters/csv/*.$(OBJEXT) - -rm -f web/api/formatters/json/*.$(OBJEXT) - -rm -f web/api/formatters/ssv/*.$(OBJEXT) - -rm -f web/api/formatters/value/*.$(OBJEXT) - -rm -f web/api/queries/*.$(OBJEXT) - -rm -f web/api/queries/average/*.$(OBJEXT) - -rm -f web/api/queries/des/*.$(OBJEXT) - -rm -f web/api/queries/incremental_sum/*.$(OBJEXT) - -rm -f web/api/queries/max/*.$(OBJEXT) - -rm -f web/api/queries/median/*.$(OBJEXT) - -rm -f web/api/queries/min/*.$(OBJEXT) - -rm -f web/api/queries/ses/*.$(OBJEXT) - -rm -f web/api/queries/stddev/*.$(OBJEXT) - -rm -f web/api/queries/sum/*.$(OBJEXT) - -rm -f web/server/*.$(OBJEXT) - -rm -f web/server/multi/*.$(OBJEXT) - -rm -f web/server/single/*.$(OBJEXT) - -rm -f web/server/static/*.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -@AMDEP_TRUE@@am__include@ @am__quote@backends/$(DEPDIR)/backends.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@backends/graphite/$(DEPDIR)/graphite.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@backends/json/$(DEPDIR)/json.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@backends/opentsdb/$(DEPDIR)/opentsdb.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@backends/prometheus/$(DEPDIR)/backend_prometheus.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/apps.plugin/$(DEPDIR)/apps_plugin.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/cgroups.plugin/$(DEPDIR)/cgroup-network.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/cgroups.plugin/$(DEPDIR)/sys_fs_cgroup.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/checks.plugin/$(DEPDIR)/plugin_checks.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/diskspace.plugin/$(DEPDIR)/plugin_diskspace.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_devstat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_getifaddrs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_getmntinfo.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_ipfw.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_kstat_zfs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/freebsd_sysctl.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/freebsd.plugin/$(DEPDIR)/plugin_freebsd.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/freeipmi.plugin/$(DEPDIR)/freeipmi_plugin.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/idlejitter.plugin/$(DEPDIR)/plugin_idlejitter.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/macos_fw.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/macos_mach_smi.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/macos_sysctl.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/macos.plugin/$(DEPDIR)/plugin_macos.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/nfacct.plugin/$(DEPDIR)/plugin_nfacct.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/plugins.d/$(DEPDIR)/plugins_d.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/ipc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/plugin_proc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_diskstats.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_interrupts.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_loadavg.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_meminfo.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_dev.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_ip_vs_stats.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_netstat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_rpc_nfs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_rpc_nfsd.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_sctp_snmp.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_snmp.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_snmp6.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_sockstat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_sockstat6.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_softnet_stat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_stat_conntrack.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_net_stat_synproxy.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_self_mountinfo.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_softirqs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_spl_kstat_zfs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_stat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_sys_kernel_random_entropy_avail.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_uptime.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/proc_vmstat.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_devices_system_edac_mc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_devices_system_node.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_fs_btrfs.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/sys_kernel_mm_ksm.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/proc.plugin/$(DEPDIR)/zfs_common.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/statsd.plugin/$(DEPDIR)/statsd.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@collectors/tc.plugin/$(DEPDIR)/plugin_tc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/common.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/daemon.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/global_statistics.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/main.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/signals.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@daemon/$(DEPDIR)/unit_test.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrd.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdcalc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdcalctemplate.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrddim.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrddimvar.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdfamily.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdhost.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdset.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdsetvar.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@database/$(DEPDIR)/rrdvar.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health_config.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health_json.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@health/$(DEPDIR)/health_log.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/$(DEPDIR)/libnetdata.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/$(DEPDIR)/os.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/adaptive_resortable_list/$(DEPDIR)/adaptive_resortable_list.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/avl/$(DEPDIR)/avl.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/buffer/$(DEPDIR)/buffer.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/clocks/$(DEPDIR)/clocks.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/config/$(DEPDIR)/appconfig.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/dictionary/$(DEPDIR)/dictionary.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/eval/$(DEPDIR)/eval.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/locks/$(DEPDIR)/locks.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/log/$(DEPDIR)/log.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/popen/$(DEPDIR)/popen.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/procfile/$(DEPDIR)/procfile.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/simple_pattern/$(DEPDIR)/simple_pattern.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/socket/$(DEPDIR)/socket.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/statistical/$(DEPDIR)/statistical.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/storage_number/$(DEPDIR)/storage_number.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/threads/$(DEPDIR)/threads.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@libnetdata/url/$(DEPDIR)/url.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_db.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_init.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_internals.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_log.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_machine.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_person.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@registry/$(DEPDIR)/registry_url.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@streaming/$(DEPDIR)/rrdpush.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/$(DEPDIR)/web_api_v1.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/badges/$(DEPDIR)/web_buffer_svg.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/exporters/$(DEPDIR)/allmetrics.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/exporters/shell/$(DEPDIR)/allmetrics_shell.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/charts2json.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/json_wrapper.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/rrd2json.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/$(DEPDIR)/rrdset2json.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/csv/$(DEPDIR)/csv.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/json/$(DEPDIR)/json.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/ssv/$(DEPDIR)/ssv.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/formatters/value/$(DEPDIR)/value.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/$(DEPDIR)/query.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/$(DEPDIR)/rrdr.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/average/$(DEPDIR)/average.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/des/$(DEPDIR)/des.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/incremental_sum/$(DEPDIR)/incremental_sum.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/max/$(DEPDIR)/max.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/median/$(DEPDIR)/median.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/min/$(DEPDIR)/min.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/ses/$(DEPDIR)/ses.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/stddev/$(DEPDIR)/stddev.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/api/queries/sum/$(DEPDIR)/sum.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/server/$(DEPDIR)/web_client.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/server/$(DEPDIR)/web_client_cache.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/server/$(DEPDIR)/web_server.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/server/multi/$(DEPDIR)/multi-threaded.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/server/single/$(DEPDIR)/single-threaded.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@web/server/static/$(DEPDIR)/static-threaded.Po@am__quote@ - -.c.o: -@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ -@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ -@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po -@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< - -.c.obj: -@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ -@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ -@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po -@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` -install-dist_cacheDATA: $(dist_cache_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_cache_DATA)'; test -n "$(cachedir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(cachedir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(cachedir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(cachedir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(cachedir)" || exit $$?; \ - done - -uninstall-dist_cacheDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_cache_DATA)'; test -n "$(cachedir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(cachedir)'; $(am__uninstall_files_from_dir) -install-dist_logDATA: $(dist_log_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_log_DATA)'; test -n "$(logdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(logdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(logdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(logdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(logdir)" || exit $$?; \ - done - -uninstall-dist_logDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_log_DATA)'; test -n "$(logdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(logdir)'; $(am__uninstall_files_from_dir) -install-dist_registryDATA: $(dist_registry_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_registry_DATA)'; test -n "$(registrydir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(registrydir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(registrydir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(registrydir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(registrydir)" || exit $$?; \ - done - -uninstall-dist_registryDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_registry_DATA)'; test -n "$(registrydir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(registrydir)'; $(am__uninstall_files_from_dir) -install-dist_varlibDATA: $(dist_varlib_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_varlib_DATA)'; test -n "$(varlibdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(varlibdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(varlibdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(varlibdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(varlibdir)" || exit $$?; \ - done - -uninstall-dist_varlibDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_varlib_DATA)'; test -n "$(varlibdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(varlibdir)'; $(am__uninstall_files_from_dir) - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscope: cscope.files - test ! -s cscope.files \ - || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) -clean-cscope: - -rm -f cscope.files -cscope.files: clean-cscope cscopelist -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -rm -f cscope.out cscope.in.out cscope.po.out cscope.files - -distdir: $(DISTFILES) - $(am__remove_distdir) - test -d "$(distdir)" || mkdir "$(distdir)" - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done - -test -n "$(am__skip_mode_fix)" \ - || find "$(distdir)" -type d ! -perm -755 \ - -exec chmod u+rwx,go+rx {} \; -o \ - ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ - ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ - || chmod -R a+r "$(distdir)" -dist-gzip: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz - $(am__post_remove_distdir) - -dist-bzip2: distdir - tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 - $(am__post_remove_distdir) - -dist-lzip: distdir - tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz - $(am__post_remove_distdir) - -dist-xz: distdir - tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz - $(am__post_remove_distdir) - -dist-tarZ: distdir - @echo WARNING: "Support for shar distribution archives is" \ - "deprecated." >&2 - @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 - tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z - $(am__post_remove_distdir) - -dist-shar: distdir - @echo WARNING: "Support for distribution archives compressed with" \ - "legacy program 'compress' is deprecated." >&2 - @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 - shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz - $(am__post_remove_distdir) - -dist-zip: distdir - -rm -f $(distdir).zip - zip -rq $(distdir).zip $(distdir) - $(am__post_remove_distdir) - -dist dist-all: - $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' - $(am__post_remove_distdir) - -# This target untars the dist file and tries a VPATH configuration. Then -# it guarantees that the distribution is self-contained by making another -# tarfile. -distcheck: dist - case '$(DIST_ARCHIVES)' in \ - *.tar.gz*) \ - GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ - *.tar.bz2*) \ - bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ - *.tar.lz*) \ - lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ - *.tar.xz*) \ - xz -dc $(distdir).tar.xz | $(am__untar) ;;\ - *.tar.Z*) \ - uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ - *.shar.gz*) \ - GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ - *.zip*) \ - unzip $(distdir).zip ;;\ - esac - chmod -R a-w $(distdir) - chmod u+w $(distdir) - mkdir $(distdir)/_build $(distdir)/_inst - chmod a-w $(distdir) - test -d $(distdir)/_build || exit 0; \ - dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ - && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ - && am__cwd=`pwd` \ - && $(am__cd) $(distdir)/_build \ - && ../configure \ - $(AM_DISTCHECK_CONFIGURE_FLAGS) \ - $(DISTCHECK_CONFIGURE_FLAGS) \ - --srcdir=.. --prefix="$$dc_install_base" \ - && $(MAKE) $(AM_MAKEFLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) dvi \ - && $(MAKE) $(AM_MAKEFLAGS) check \ - && $(MAKE) $(AM_MAKEFLAGS) install \ - && $(MAKE) $(AM_MAKEFLAGS) installcheck \ - && $(MAKE) $(AM_MAKEFLAGS) uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ - distuninstallcheck \ - && chmod -R a-w "$$dc_install_base" \ - && ({ \ - (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ - && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ - distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ - } || { rm -rf "$$dc_destdir"; exit 1; }) \ - && rm -rf "$$dc_destdir" \ - && $(MAKE) $(AM_MAKEFLAGS) dist \ - && rm -rf $(DIST_ARCHIVES) \ - && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ - && cd "$$am__cwd" \ - || exit 1 - $(am__post_remove_distdir) - @(echo "$(distdir) archives ready for distribution: "; \ - list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ - sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' -distuninstallcheck: - @test -n '$(distuninstallcheck_dir)' || { \ - echo 'ERROR: trying to run $@ with an empty' \ - '$$(distuninstallcheck_dir)' >&2; \ - exit 1; \ - }; \ - $(am__cd) '$(distuninstallcheck_dir)' || { \ - echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ - exit 1; \ - }; \ - test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ - || { echo "ERROR: files left after uninstall:" ; \ - if test -n "$(DESTDIR)"; then \ - echo " (check DESTDIR support)"; \ - fi ; \ - $(distuninstallcheck_listfiles) ; \ - exit 1; } >&2 -distcleancheck: distclean - @if test '$(srcdir)' = . ; then \ - echo "ERROR: distcleancheck can only run from a VPATH build" ; \ - exit 1 ; \ - fi - @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ - || { echo "ERROR: files left in build directory after distclean:" ; \ - $(distcleancheck_listfiles) ; \ - exit 1; } >&2 -check-am: all-am -check: check-recursive -all-am: Makefile $(PROGRAMS) $(SCRIPTS) $(DATA) config.h -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(cachedir)" "$(DESTDIR)$(logdir)" "$(DESTDIR)$(registrydir)" "$(DESTDIR)$(varlibdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -rm -f backends/$(DEPDIR)/$(am__dirstamp) - -rm -f backends/$(am__dirstamp) - -rm -f backends/graphite/$(DEPDIR)/$(am__dirstamp) - -rm -f backends/graphite/$(am__dirstamp) - -rm -f backends/json/$(DEPDIR)/$(am__dirstamp) - -rm -f backends/json/$(am__dirstamp) - -rm -f backends/opentsdb/$(DEPDIR)/$(am__dirstamp) - -rm -f backends/opentsdb/$(am__dirstamp) - -rm -f backends/prometheus/$(DEPDIR)/$(am__dirstamp) - -rm -f backends/prometheus/$(am__dirstamp) - -rm -f collectors/apps.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/apps.plugin/$(am__dirstamp) - -rm -f collectors/cgroups.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/cgroups.plugin/$(am__dirstamp) - -rm -f collectors/checks.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/checks.plugin/$(am__dirstamp) - -rm -f collectors/diskspace.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/diskspace.plugin/$(am__dirstamp) - -rm -f collectors/freebsd.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/freebsd.plugin/$(am__dirstamp) - -rm -f collectors/freeipmi.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/freeipmi.plugin/$(am__dirstamp) - -rm -f collectors/idlejitter.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/idlejitter.plugin/$(am__dirstamp) - -rm -f collectors/macos.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/macos.plugin/$(am__dirstamp) - -rm -f collectors/nfacct.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/nfacct.plugin/$(am__dirstamp) - -rm -f collectors/plugins.d/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/plugins.d/$(am__dirstamp) - -rm -f collectors/proc.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/proc.plugin/$(am__dirstamp) - -rm -f collectors/statsd.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/statsd.plugin/$(am__dirstamp) - -rm -f collectors/tc.plugin/$(DEPDIR)/$(am__dirstamp) - -rm -f collectors/tc.plugin/$(am__dirstamp) - -rm -f daemon/$(DEPDIR)/$(am__dirstamp) - -rm -f daemon/$(am__dirstamp) - -rm -f database/$(DEPDIR)/$(am__dirstamp) - -rm -f database/$(am__dirstamp) - -rm -f health/$(DEPDIR)/$(am__dirstamp) - -rm -f health/$(am__dirstamp) - -rm -f libnetdata/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/$(am__dirstamp) - -rm -f libnetdata/adaptive_resortable_list/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/adaptive_resortable_list/$(am__dirstamp) - -rm -f libnetdata/avl/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/avl/$(am__dirstamp) - -rm -f libnetdata/buffer/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/buffer/$(am__dirstamp) - -rm -f libnetdata/clocks/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/clocks/$(am__dirstamp) - -rm -f libnetdata/config/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/config/$(am__dirstamp) - -rm -f libnetdata/dictionary/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/dictionary/$(am__dirstamp) - -rm -f libnetdata/eval/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/eval/$(am__dirstamp) - -rm -f libnetdata/locks/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/locks/$(am__dirstamp) - -rm -f libnetdata/log/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/log/$(am__dirstamp) - -rm -f libnetdata/popen/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/popen/$(am__dirstamp) - -rm -f libnetdata/procfile/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/procfile/$(am__dirstamp) - -rm -f libnetdata/simple_pattern/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/simple_pattern/$(am__dirstamp) - -rm -f libnetdata/socket/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/socket/$(am__dirstamp) - -rm -f libnetdata/statistical/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/statistical/$(am__dirstamp) - -rm -f libnetdata/storage_number/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/storage_number/$(am__dirstamp) - -rm -f libnetdata/threads/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/threads/$(am__dirstamp) - -rm -f libnetdata/url/$(DEPDIR)/$(am__dirstamp) - -rm -f libnetdata/url/$(am__dirstamp) - -rm -f registry/$(DEPDIR)/$(am__dirstamp) - -rm -f registry/$(am__dirstamp) - -rm -f streaming/$(DEPDIR)/$(am__dirstamp) - -rm -f streaming/$(am__dirstamp) - -rm -f web/api/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/$(am__dirstamp) - -rm -f web/api/badges/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/badges/$(am__dirstamp) - -rm -f web/api/exporters/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/exporters/$(am__dirstamp) - -rm -f web/api/exporters/shell/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/exporters/shell/$(am__dirstamp) - -rm -f web/api/formatters/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/formatters/$(am__dirstamp) - -rm -f web/api/formatters/csv/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/formatters/csv/$(am__dirstamp) - -rm -f web/api/formatters/json/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/formatters/json/$(am__dirstamp) - -rm -f web/api/formatters/ssv/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/formatters/ssv/$(am__dirstamp) - -rm -f web/api/formatters/value/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/formatters/value/$(am__dirstamp) - -rm -f web/api/queries/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/$(am__dirstamp) - -rm -f web/api/queries/average/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/average/$(am__dirstamp) - -rm -f web/api/queries/des/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/des/$(am__dirstamp) - -rm -f web/api/queries/incremental_sum/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/incremental_sum/$(am__dirstamp) - -rm -f web/api/queries/max/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/max/$(am__dirstamp) - -rm -f web/api/queries/median/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/median/$(am__dirstamp) - -rm -f web/api/queries/min/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/min/$(am__dirstamp) - -rm -f web/api/queries/ses/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/ses/$(am__dirstamp) - -rm -f web/api/queries/stddev/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/stddev/$(am__dirstamp) - -rm -f web/api/queries/sum/$(DEPDIR)/$(am__dirstamp) - -rm -f web/api/queries/sum/$(am__dirstamp) - -rm -f web/server/$(DEPDIR)/$(am__dirstamp) - -rm -f web/server/$(am__dirstamp) - -rm -f web/server/multi/$(DEPDIR)/$(am__dirstamp) - -rm -f web/server/multi/$(am__dirstamp) - -rm -f web/server/single/$(DEPDIR)/$(am__dirstamp) - -rm -f web/server/single/$(am__dirstamp) - -rm -f web/server/static/$(DEPDIR)/$(am__dirstamp) - -rm -f web/server/static/$(am__dirstamp) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic clean-pluginsPROGRAMS clean-sbinPROGRAMS \ - mostlyclean-am - -distclean: distclean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf backends/$(DEPDIR) backends/graphite/$(DEPDIR) backends/json/$(DEPDIR) backends/opentsdb/$(DEPDIR) backends/prometheus/$(DEPDIR) collectors/apps.plugin/$(DEPDIR) collectors/cgroups.plugin/$(DEPDIR) collectors/checks.plugin/$(DEPDIR) collectors/diskspace.plugin/$(DEPDIR) collectors/freebsd.plugin/$(DEPDIR) collectors/freeipmi.plugin/$(DEPDIR) collectors/idlejitter.plugin/$(DEPDIR) collectors/macos.plugin/$(DEPDIR) collectors/nfacct.plugin/$(DEPDIR) collectors/plugins.d/$(DEPDIR) collectors/proc.plugin/$(DEPDIR) collectors/statsd.plugin/$(DEPDIR) collectors/tc.plugin/$(DEPDIR) daemon/$(DEPDIR) database/$(DEPDIR) health/$(DEPDIR) libnetdata/$(DEPDIR) libnetdata/adaptive_resortable_list/$(DEPDIR) libnetdata/avl/$(DEPDIR) libnetdata/buffer/$(DEPDIR) libnetdata/clocks/$(DEPDIR) libnetdata/config/$(DEPDIR) libnetdata/dictionary/$(DEPDIR) libnetdata/eval/$(DEPDIR) libnetdata/locks/$(DEPDIR) libnetdata/log/$(DEPDIR) libnetdata/popen/$(DEPDIR) libnetdata/procfile/$(DEPDIR) libnetdata/simple_pattern/$(DEPDIR) libnetdata/socket/$(DEPDIR) libnetdata/statistical/$(DEPDIR) libnetdata/storage_number/$(DEPDIR) libnetdata/threads/$(DEPDIR) libnetdata/url/$(DEPDIR) registry/$(DEPDIR) streaming/$(DEPDIR) web/api/$(DEPDIR) web/api/badges/$(DEPDIR) web/api/exporters/$(DEPDIR) web/api/exporters/shell/$(DEPDIR) web/api/formatters/$(DEPDIR) web/api/formatters/csv/$(DEPDIR) web/api/formatters/json/$(DEPDIR) web/api/formatters/ssv/$(DEPDIR) web/api/formatters/value/$(DEPDIR) web/api/queries/$(DEPDIR) web/api/queries/average/$(DEPDIR) web/api/queries/des/$(DEPDIR) web/api/queries/incremental_sum/$(DEPDIR) web/api/queries/max/$(DEPDIR) web/api/queries/median/$(DEPDIR) web/api/queries/min/$(DEPDIR) web/api/queries/ses/$(DEPDIR) web/api/queries/stddev/$(DEPDIR) web/api/queries/sum/$(DEPDIR) web/server/$(DEPDIR) web/server/multi/$(DEPDIR) web/server/single/$(DEPDIR) web/server/static/$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-hdr distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: install-dist_cacheDATA install-dist_logDATA \ - install-dist_registryDATA install-dist_varlibDATA \ - install-pluginsPROGRAMS - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: install-sbinPROGRAMS - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf $(top_srcdir)/autom4te.cache - -rm -rf backends/$(DEPDIR) backends/graphite/$(DEPDIR) backends/json/$(DEPDIR) backends/opentsdb/$(DEPDIR) backends/prometheus/$(DEPDIR) collectors/apps.plugin/$(DEPDIR) collectors/cgroups.plugin/$(DEPDIR) collectors/checks.plugin/$(DEPDIR) collectors/diskspace.plugin/$(DEPDIR) collectors/freebsd.plugin/$(DEPDIR) collectors/freeipmi.plugin/$(DEPDIR) collectors/idlejitter.plugin/$(DEPDIR) collectors/macos.plugin/$(DEPDIR) collectors/nfacct.plugin/$(DEPDIR) collectors/plugins.d/$(DEPDIR) collectors/proc.plugin/$(DEPDIR) collectors/statsd.plugin/$(DEPDIR) collectors/tc.plugin/$(DEPDIR) daemon/$(DEPDIR) database/$(DEPDIR) health/$(DEPDIR) libnetdata/$(DEPDIR) libnetdata/adaptive_resortable_list/$(DEPDIR) libnetdata/avl/$(DEPDIR) libnetdata/buffer/$(DEPDIR) libnetdata/clocks/$(DEPDIR) libnetdata/config/$(DEPDIR) libnetdata/dictionary/$(DEPDIR) libnetdata/eval/$(DEPDIR) libnetdata/locks/$(DEPDIR) libnetdata/log/$(DEPDIR) libnetdata/popen/$(DEPDIR) libnetdata/procfile/$(DEPDIR) libnetdata/simple_pattern/$(DEPDIR) libnetdata/socket/$(DEPDIR) libnetdata/statistical/$(DEPDIR) libnetdata/storage_number/$(DEPDIR) libnetdata/threads/$(DEPDIR) libnetdata/url/$(DEPDIR) registry/$(DEPDIR) streaming/$(DEPDIR) web/api/$(DEPDIR) web/api/badges/$(DEPDIR) web/api/exporters/$(DEPDIR) web/api/exporters/shell/$(DEPDIR) web/api/formatters/$(DEPDIR) web/api/formatters/csv/$(DEPDIR) web/api/formatters/json/$(DEPDIR) web/api/formatters/ssv/$(DEPDIR) web/api/formatters/value/$(DEPDIR) web/api/queries/$(DEPDIR) web/api/queries/average/$(DEPDIR) web/api/queries/des/$(DEPDIR) web/api/queries/incremental_sum/$(DEPDIR) web/api/queries/max/$(DEPDIR) web/api/queries/median/$(DEPDIR) web/api/queries/min/$(DEPDIR) web/api/queries/ses/$(DEPDIR) web/api/queries/stddev/$(DEPDIR) web/api/queries/sum/$(DEPDIR) web/server/$(DEPDIR) web/server/multi/$(DEPDIR) web/server/single/$(DEPDIR) web/server/static/$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-compile mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-dist_cacheDATA uninstall-dist_logDATA \ - uninstall-dist_registryDATA uninstall-dist_varlibDATA \ - uninstall-pluginsPROGRAMS uninstall-sbinPROGRAMS - -.MAKE: $(am__recursive_targets) all install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ - am--refresh check check-am clean clean-cscope clean-generic \ - clean-pluginsPROGRAMS clean-sbinPROGRAMS cscope cscopelist-am \ - ctags ctags-am dist dist-all dist-bzip2 dist-gzip dist-lzip \ - dist-shar dist-tarZ dist-xz dist-zip distcheck distclean \ - distclean-compile distclean-generic distclean-hdr \ - distclean-tags distcleancheck distdir distuninstallcheck dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-dist_cacheDATA \ - install-dist_logDATA install-dist_registryDATA \ - install-dist_varlibDATA install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-pluginsPROGRAMS install-ps \ - install-ps-am install-sbinPROGRAMS install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \ - uninstall-am uninstall-dist_cacheDATA uninstall-dist_logDATA \ - uninstall-dist_registryDATA uninstall-dist_varlibDATA \ - uninstall-pluginsPROGRAMS uninstall-sbinPROGRAMS - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/README.md b/README.md index 5d17627dc..c152a8421 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# netdata [![Build Status](https://travis-ci.com/netdata/netdata.svg?branch=master)](https://travis-ci.com/netdata/netdata) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2231/badge)](https://bestpractices.coreinfrastructure.org/projects/2231) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![analytics](https://www.google-analytics.com/collect?v=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() +# netdata [![Build Status](https://travis-ci.com/netdata/netdata.svg?branch=master)](https://travis-ci.com/netdata/netdata) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2231/badge)](https://bestpractices.coreinfrastructure.org/projects/2231) [![License: GPL v3+](https://img.shields.io/badge/License-GPL%20v3%2B-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Freadme&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() [![Code Climate](https://codeclimate.com/github/netdata/netdata/badges/gpa.svg)](https://codeclimate.com/github/netdata/netdata) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a994873f30d045b9b4b83606c3eb3498)](https://www.codacy.com/app/netdata/netdata?utm_source=github.com&utm_medium=referral&utm_content=netdata/netdata&utm_campaign=Badge_Grade) [![LGTM C](https://img.shields.io/lgtm/grade/cpp/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:cpp) [![LGTM JS](https://img.shields.io/lgtm/grade/javascript/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:javascript) [![LGTM PYTHON](https://img.shields.io/lgtm/grade/python/g/netdata/netdata.svg?logo=lgtm)](https://lgtm.com/projects/g/netdata/netdata/context:python) @@ -6,7 +6,7 @@ **Netdata** is **distributed, real-time, performance and health monitoring for systems and applications**. It is a highly optimized monitoring agent you install on all your systems and containers. -Netdata provides **unparalleled insights**, **in real-time**, of everything happening on the systems it runs (including web servers, databases, applications), using **highly interactive web dashboards**. It can run autonomously, without any third party components, or it can be integrated to existing monitoring toolchains (Prometheus, Graphite, OpenTSDB, Kafka, Grafana, etc). +Netdata provides **unparalleled insights**, **in real-time**, of everything happening on the systems it runs (including web servers, databases, applications), using **highly interactive web dashboards**. It can run autonomously, without any third party components, or it can be integrated to existing monitoring tool chains (Prometheus, Graphite, OpenTSDB, Kafka, Grafana, etc). _Netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** & **virtual** servers, **containers**, **IoT** devices), without disrupting their core function._ @@ -14,7 +14,7 @@ Netdata is **free, open-source software** and it currently runs on **Linux**, ** ![cncf](https://www.cncf.io/wp-content/uploads/2016/09/logo_cncf.png) -Netdata is in the [Cloud Native Computing Foundation (CNCF) landscape](https://landscape.cncf.io/grouping=no&sort=stars). +Netdata is in the [Cloud Native Computing Foundation (CNCF) landscape](https://landscape.cncf.io/format=card-mode&grouping=no&sort=stars) and it is the 3rd most starred open-source project. Check the [CNCF TOC Netdata presentation](https://docs.google.com/presentation/d/18C8bCTbtgKDWqPa57GXIjB2PbjjpjsUNkLtZEz6YK8s/edit?usp=sharing). --- @@ -40,9 +40,10 @@ Once you use it on your systems, **there is no going back**! *You have been warn 9. [Visualization](#visualization) - unique visualization features 10. [What does it monitor](#what-does-it-monitor) - which metrics it collects 11. [Documentation](#documentation) - read the docs -12. [Community](#community) - disucss with others and get support +12. [Community](#community) - discuss with others and get support 13. [License](#license) - check the license of netdata - +14. [Is it any good?](#is-it-any-good) - Yes +15. [Is it awesome?](#is-it-awesome) - Yes ## How it looks @@ -69,7 +70,7 @@ We provide docker images for the most common architectures. These are statistics [![netdata/netdata (official)](https://img.shields.io/docker/pulls/netdata/netdata.svg?label=netdata/netdata+%28official%29)](https://hub.docker.com/r/netdata/netdata/) [![firehol/netdata (deprecated)](https://img.shields.io/docker/pulls/firehol/netdata.svg?label=firehol/netdata+%28deprecated%29)](https://hub.docker.com/r/firehol/netdata/) [![titpetric/netdata (donated)](https://img.shields.io/docker/pulls/titpetric/netdata.svg?label=titpetric/netdata+%28third+party%29)](https://hub.docker.com/r/titpetric/netdata/) ### Registry -When you install multiple netdata, they are integrated into **one distributed application**, via a [netdata registry](registry/#netdata-registry). This is a web browser feature and it allows us to count the number of unique users and unique netdata servers installed. The following information comes from the global public netdata registry we run: +When you install multiple netdata, they are integrated into **one distributed application**, via a [netdata registry](registry/#registry). This is a web browser feature and it allows us to count the number of unique users and unique netdata servers installed. The following information comes from the global public netdata registry we run: [![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=M&value_color=blue&precision=2÷=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=k÷=1000&value_color=orange&precision=2&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) [![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=M&value_color=yellowgreen&precision=2÷=1000000&v43)](https://registry.my-netdata.io/#menu_netdata_submenu_registry) @@ -91,10 +92,9 @@ bash <(curl -Ss https://my-netdata.io/kickstart.sh) The above command will: 1. install any required packages on your system (it will ask you to confirm before doing so), -2. download netdata source to `/usr/src/netdata.git` -3. compile it, install it and start it +2. compile it, install it and start it -More installation methods and additional options can be found at the [installation page](installer/#installation). +More installation methods and additional options can be found at the [installation page](packaging/installer/#installation). To try netdata in a docker container, run this: @@ -109,7 +109,7 @@ docker run -d --name=netdata \ netdata/netdata ``` -For more information about running netdata in docker, check the [docker installation page](docker/). +For more information about running netdata in docker, check the [docker installation page](packaging/docker/). ![image](https://user-images.githubusercontent.com/2662304/48304090-fd384080-e51b-11e8-80ae-eecb03118dda.png) @@ -147,20 +147,15 @@ not just visualize metrics. ## News -`Nov 6th, 2018` - **[netdata v1.11.0 released!](https://github.com/netdata/netdata/releases)** - - - New query engine, supporting statistical functions. - - Fixed security issues identified by Red4Sec.com and Synacktiv. - - New Data Collection Modules: [`rethinkdbs`](collectors/python.d.plugin/rethinkdbs/), [`proxysql`](collectors/python.d.plugin/proxysql/), [`litespeed`](collectors/python.d.plugin/litespeed/), [`uwsgi`](collectors/python.d.plugin/uwsgi/), [`unbound`](collectors/python.d.plugin/unbound/), [`powerdns`](collectors/python.d.plugin/powerdns/), [`dockerd`](collectors/python.d.plugin/dockerd/), [`puppet`](collectors/python.d.plugin/puppet/), [`logind`](collectors/python.d.plugin/logind/), [`adaptec_raid`](collectors/python.d.plugin/adaptec_raid/), [`megacli`](collectors/python.d.plugin/megacli/), [`spigotmc`](collectors/python.d.plugin/spigotmc/), [`boinc`](collectors/python.d.plugin/boinc/), [`w1sensor`](collectors/python.d.plugin/w1sensor/), [`monit`](collectors/python.d.plugin/monit/), [`linux_power_supplies`](collectors/python.d.plugin/linux_power_supply/). - - Improved Data Collection Modules: [`statsd.plugin`](collectors/statsd.plugin/), [`apps.plugin`](collectors/apps.plugin/), [`freeipmi.plugin`](collectors/freeipmi.plugin/), [`proc.plugin`](collectors/proc.plugin/), [`diskspace.plugin`](collectors/diskspace.plugin/), [`freebsd.plugin`](collectors/freebsd.plugin/), [`python.d.plugin`](collectors/python.d.plugin/), [`web_log`](collectors/python.d.plugin/web_log/), [`nginx_plus`](collectors/python.d.plugin/nginx_plus/), [`ipfs`](collectors/python.d.plugin/ipfs/), [`fail2ban`](collectors/python.d.plugin/fail2ban/), [`ceph`](collectors/python.d.plugin/ceph/), [`elasticsearch`](collectors/python.d.plugin/elasticsearch/), [`redis`](collectors/python.d.plugin/redis/), - [`beanstalk`](collectors/python.d.plugin/beanstalk/), [`mysql`](collectors/python.d.plugin/mysql/), [`varnish`](collectors/python.d.plugin/varnish/), [`couchdb`](collectors/python.d.plugin/couchdb/), [`phpfpm`](collectors/python.d.plugin/phpfpm/), [`apache`](collectors/python.d.plugin/apache/), [`icecast`](collectors/python.d.plugin/icecast/), [`mongodb`](collectors/python.d.plugin/mongodb/), [`postgress`](collectors/python.d.plugin/postgres/), [`mdstat`](collectors/python.d.plugin/mdstat/), [`openvpn_log`](collectors/python.d.plugin/ovpn_status_log/), [`snmp`](collectors/node.d.plugin/snmp/), [`nut`](collectors/charts.d.plugin/nut/). - - - Added alarms for detecting abnormally high load average, `TCP` `SYN` and `TCP` accept queue overflows, network interfaces congestion and alarms for `bcache`, `mdstat`, `apcupsd`, `mysql`. - - system alarms are now enabled on FreeBSD. - - New notification methods: **[rocket.chat](health/notifications/rocketchat/)**, **Microsoft Teams**, **[syslog](health/notifications/syslog/)**, **fleep.io**, **[Amazon SNS](health/notifications/awssns/)**. - - - and dozens more improvements, enhancements, features and compatibility fixes - +`Nov 22nd, 2018` - **[netdata v1.11.1 released!](https://github.com/netdata/netdata/releases)** + +- Improved internal database to support values above 64bit. +- New data collection plugins: [`openldap`](collectors/python.d.plugin/openldap/), [`tor`](collectors/python.d.plugin/tor/), [`nvidia_smi`](collectors/python.d.plugin/nvidia_smi/). +- Improved data collection plugins: netdata now supports monitoring network interface aliases, [`smartd_log`](collectors/python.d.plugin/smartd_log/), [`cpufreq`](collectors/python.d.plugin/cpufreq/), [`sensors`](collectors/python.d.plugin/sensors/). +- Health monitoring improvements: network interface congestion alarm restored, [`alerta.io`](health/notifications/alerta/), `conntrack_max`. +- `my-netdata`menu has been refactored. +- Packaging: `openrc` service definition got a few improvements. + --- `Sep 18, 2018` - **netdata has its own organization** @@ -180,11 +175,11 @@ This is how it works: Function|Description|Documentation :---:|:---|:---: **Collect**|Multiple independent data collection workers are collecting metrics from their sources using the optimal protocol for each application and push the metrics to the database. Each data collection worker has lockless write access to the metrics it collects.|[`collectors`](collectors/#data-collection-plugins) -**Store**|Metrics are stored in RAM in a round robin database (ring buffer), using a custom made floating point number for minimal footprint.|[`database`](database/#netdata-database) +**Store**|Metrics are stored in RAM in a round robin database (ring buffer), using a custom made floating point number for minimal footprint.|[`database`](database/#database) **Check**|A lockless independent watchdog is evaluating **health checks** on the collected metrics, triggers alarms, maintains a health transaction log and dispatches alarm notifications.|[`health`](health/#health-monitoring) -**Stream**|An lockless independent worker is streaming metrics, in full detail and in real-time, to remote netdata servers, as soon as they are collected.|[`streaming`](streaming/#metrics-streaming) +**Stream**|An lockless independent worker is streaming metrics, in full detail and in real-time, to remote netdata servers, as soon as they are collected.|[`streaming`](streaming/#streaming-and-replication) **Archive**|A lockless independent worker is down-sampling the metrics and pushes them to **backend** time-series databases.|[`backends`](backends/) -**Query**|Multiple independent workers are attached to the [internal web server](web/server/#netdata-web-server), servicing API requests, including [data queries](web/api/queries/#database-queries).|[`web/api`](web/api/#api) +**Query**|Multiple independent workers are attached to the [internal web server](web/server/#web-server), servicing API requests, including [data queries](web/api/queries/#database-queries).|[`web/api`](web/api/#api) The result is a highly efficient, low latency system, supporting multiple readers and one writer on each metric. @@ -253,7 +248,7 @@ Charts on netdata dashboards are synchronized to each other. There is no master *Charts are panned by dragging them with the mouse. Charts can be zoomed in/out with`SHIFT` + `mouse wheel` while the mouse pointer is over a chart.* -> The visible time-frame (pan and zoom) is propagated from netdata server to netdata server, when navigating via the [`my-netdata` menu](registry#netdata-registry). +> The visible time-frame (pan and zoom) is propagated from netdata server to netdata server, when navigating via the [`my-netdata` menu](registry#registry). ### Highlighted time-frame @@ -264,7 +259,7 @@ To improve visual anomaly detection across charts, the user can highlight a time *A highlighted time-frame can be given by pressing `ALT` + `mouse selection` on any chart. Netdata will highlight the same range on all charts.* -> Highlighted ranges are propagated from netdata server to netdata server, when navigating via the [`my-netdata` menu](registry#netdata-registry). +> Highlighted ranges are propagated from netdata server to netdata server, when navigating via the [`my-netdata` menu](registry#registry). ## What does it monitor @@ -452,20 +447,20 @@ Its [Plugin API](collectors/plugins.d/) supports all programing languages (anyth #### Provisioning Systems - **[Puppet](collectors/python.d.plugin/puppet/)** - connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics. -And you can extend it, by writing plugins that collect data from any source, using any computer language. +You can easily extend Netdata, by writing plugins that collect data from any source, using any computer language. --- ## Documentation -The netdata documentation is inside the repo, so by just navigating the repo on github you can find all the documentation. +The netdata documentation is at [https://docs.netdata.cloud](https://docs.netdata.cloud). But you can also find it inside the repo, so by just navigating the repo on github you can find all the documentation. Here is a quick list: Directory|Description :---|:--- -[`installer`](installer/)|Instructions to install netdata on your systems. -[`docker`](docker/)|Instructions to install netdata using docker. +[`installer`](packaging/installer/)|Instructions to install netdata on your systems. +[`docker`](packaging/docker/)|Instructions to install netdata using docker. [`daemon`](daemon/)|Information about the netdata daemon and its configuration. [`collectors`](collectors/)|Information about data collection plugins. [`health`](health/)|How netdata's health monitoring works, how to create your own alarms and how to configure alarm notification methods. @@ -476,11 +471,11 @@ Directory|Description [`web/gui/custom`](web/gui/custom/)|Learn how to create custom netdata dashboards. [`web/gui/confluence`](web/gui/confluence/)|Learn how to create netdata dashboards on Atlassian's Confluence. -But you can also check all the other directories. Most of them have plenty of documentation. +You can also check all the other directories. Most of them have plenty of documentation. ## Community -We welcome contributions. So, feel free to join the team. +We welcome [contributions](CONTRIBUTING.md). So, feel free to join the team. To report bugs, or get help, use [GitHub Issues](https://github.com/netdata/netdata/issues). @@ -492,8 +487,22 @@ You can also find netdata on: - [Repology](https://repology.org/metapackage/netdata/versions) - [StackShare](https://stackshare.io/netdata) -## License +## License netdata is [GPLv3+](LICENSE). Netdata re-distributes other open-source tools and libraries. Please check the [third party licenses](REDISTRIBUTED.md). + +## Is it any good? + +Yes. + +*When people first hear about a new product, they frequently ask if it is any good. A Hacker News user [remarked](https://news.ycombinator.com/item?id=3067434):* + +> Note to self: Starting immediately, all raganwald projects will have a “Is it any good?” section in the readme, and the answer shall be “yes.". + +So, we follow the tradition... + +## Is it awesome? + +[These people](https://github.com/netdata/netdata/stargazers) seem to like it. diff --git a/REDISTRIBUTED.md b/REDISTRIBUTED.md index fbafa1aba..b0fac2e75 100644 --- a/REDISTRIBUTED.md +++ b/REDISTRIBUTED.md @@ -1,18 +1,15 @@ -# Netdata +# Redistributed software -Copyright 2016-2017, Costa Tsaousis. -Copyright 2017-2018, Netdata Inc. -Released under [GPL v3 or later](http://www.gnu.org/licenses/gpl-3.0.en.html). +netdata copyright info: + Copyright 2016-2018, Costa Tsaousis. + Copyright 2018, Netdata Inc. + Released under [GPL v3 or later](LICENSE). -Netdata uses SPDX license tags to identify the license for its files. +netdata uses SPDX license tags to identify the license for its files. Individual licenses referenced in the tags are available on the [SPDX project site](http://spdx.org/licenses/). ---- - -## Re-distributed software - -Netdata re-distributes the following third-party software. -We have decided to re-distribute all these, instead of using them +netdata redistributes the following third-party software. +We have decided to redistribute all these, instead of using them through a CDN, to allow netdata to work in cases where Internet connectivity is not available. @@ -202,3 +199,5 @@ connectivity is not available. Copyright 2014, 2015, 2016 Ori Livneh [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2FREDISTRIBUTED&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/aclocal.m4 b/aclocal.m4 deleted file mode 100644 index d072284de..000000000 --- a/aclocal.m4 +++ /dev/null @@ -1,1355 +0,0 @@ -# generated automatically by aclocal 1.14.1 -*- Autoconf -*- - -# Copyright (C) 1996-2013 Free Software Foundation, Inc. - -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) -m4_ifndef([AC_AUTOCONF_VERSION], - [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl -m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, -[m4_warning([this file was generated for autoconf 2.69. -You have another version of autoconf. It may work, but is not guaranteed to. -If you have problems, you may need to regenerate the build system entirely. -To do so, use the procedure documented by the package, typically 'autoreconf'.])]) - -# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- -# serial 1 (pkg-config-0.24) -# -# Copyright © 2004 Scott James Remnant . -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -# PKG_PROG_PKG_CONFIG([MIN-VERSION]) -# ---------------------------------- -AC_DEFUN([PKG_PROG_PKG_CONFIG], -[m4_pattern_forbid([^_?PKG_[A-Z_]+$]) -m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$]) -m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$]) -AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) -AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) -AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) - -if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then - AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) -fi -if test -n "$PKG_CONFIG"; then - _pkg_min_version=m4_default([$1], [0.9.0]) - AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) - if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - PKG_CONFIG="" - fi -fi[]dnl -])# PKG_PROG_PKG_CONFIG - -# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) -# -# Check to see whether a particular set of modules exists. Similar -# to PKG_CHECK_MODULES(), but does not set variables or print errors. -# -# Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) -# only at the first occurence in configure.ac, so if the first place -# it's called might be skipped (such as if it is within an "if", you -# have to call PKG_CHECK_EXISTS manually -# -------------------------------------------------------------- -AC_DEFUN([PKG_CHECK_EXISTS], -[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl -if test -n "$PKG_CONFIG" && \ - AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then - m4_default([$2], [:]) -m4_ifvaln([$3], [else - $3])dnl -fi]) - -# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) -# --------------------------------------------- -m4_define([_PKG_CONFIG], -[if test -n "$$1"; then - pkg_cv_[]$1="$$1" - elif test -n "$PKG_CONFIG"; then - PKG_CHECK_EXISTS([$3], - [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes ], - [pkg_failed=yes]) - else - pkg_failed=untried -fi[]dnl -])# _PKG_CONFIG - -# _PKG_SHORT_ERRORS_SUPPORTED -# ----------------------------- -AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], -[AC_REQUIRE([PKG_PROG_PKG_CONFIG]) -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi[]dnl -])# _PKG_SHORT_ERRORS_SUPPORTED - - -# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], -# [ACTION-IF-NOT-FOUND]) -# -# -# Note that if there is a possibility the first call to -# PKG_CHECK_MODULES might not happen, you should be sure to include an -# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac -# -# -# -------------------------------------------------------------- -AC_DEFUN([PKG_CHECK_MODULES], -[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl -AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl -AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl - -pkg_failed=no -AC_MSG_CHECKING([for $1]) - -_PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) -_PKG_CONFIG([$1][_LIBS], [libs], [$2]) - -m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS -and $1[]_LIBS to avoid the need to call pkg-config. -See the pkg-config man page for more details.]) - -if test $pkg_failed = yes; then - AC_MSG_RESULT([no]) - _PKG_SHORT_ERRORS_SUPPORTED - if test $_pkg_short_errors_supported = yes; then - $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1` - else - $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD - - m4_default([$4], [AC_MSG_ERROR( -[Package requirements ($2) were not met: - -$$1_PKG_ERRORS - -Consider adjusting the PKG_CONFIG_PATH environment variable if you -installed software in a non-standard prefix. - -_PKG_TEXT])[]dnl - ]) -elif test $pkg_failed = untried; then - AC_MSG_RESULT([no]) - m4_default([$4], [AC_MSG_FAILURE( -[The pkg-config script could not be found or is too old. Make sure it -is in your PATH or set the PKG_CONFIG environment variable to the full -path to pkg-config. - -_PKG_TEXT - -To get pkg-config, see .])[]dnl - ]) -else - $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS - $1[]_LIBS=$pkg_cv_[]$1[]_LIBS - AC_MSG_RESULT([yes]) - $3 -fi[]dnl -])# PKG_CHECK_MODULES - -# Copyright (C) 2002-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_AUTOMAKE_VERSION(VERSION) -# ---------------------------- -# Automake X.Y traces this macro to ensure aclocal.m4 has been -# generated from the m4 files accompanying Automake X.Y. -# (This private macro should not be called outside this file.) -AC_DEFUN([AM_AUTOMAKE_VERSION], -[am__api_version='1.14' -dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to -dnl require some minimum version. Point them to the right macro. -m4_if([$1], [1.14.1], [], - [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl -]) - -# _AM_AUTOCONF_VERSION(VERSION) -# ----------------------------- -# aclocal traces this macro to find the Autoconf version. -# This is a private macro too. Using m4_define simplifies -# the logic in aclocal, which can simply ignore this definition. -m4_define([_AM_AUTOCONF_VERSION], []) - -# AM_SET_CURRENT_AUTOMAKE_VERSION -# ------------------------------- -# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. -# This function is AC_REQUIREd by AM_INIT_AUTOMAKE. -AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], -[AM_AUTOMAKE_VERSION([1.14.1])dnl -m4_ifndef([AC_AUTOCONF_VERSION], - [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl -_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) - -# AM_AUX_DIR_EXPAND -*- Autoconf -*- - -# Copyright (C) 2001-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets -# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to -# '$srcdir', '$srcdir/..', or '$srcdir/../..'. -# -# Of course, Automake must honor this variable whenever it calls a -# tool from the auxiliary directory. The problem is that $srcdir (and -# therefore $ac_aux_dir as well) can be either absolute or relative, -# depending on how configure is run. This is pretty annoying, since -# it makes $ac_aux_dir quite unusable in subdirectories: in the top -# source directory, any form will work fine, but in subdirectories a -# relative path needs to be adjusted first. -# -# $ac_aux_dir/missing -# fails when called from a subdirectory if $ac_aux_dir is relative -# $top_srcdir/$ac_aux_dir/missing -# fails if $ac_aux_dir is absolute, -# fails when called from a subdirectory in a VPATH build with -# a relative $ac_aux_dir -# -# The reason of the latter failure is that $top_srcdir and $ac_aux_dir -# are both prefixed by $srcdir. In an in-source build this is usually -# harmless because $srcdir is '.', but things will broke when you -# start a VPATH build or use an absolute $srcdir. -# -# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, -# iff we strip the leading $srcdir from $ac_aux_dir. That would be: -# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` -# and then we would define $MISSING as -# MISSING="\${SHELL} $am_aux_dir/missing" -# This will work as long as MISSING is not called from configure, because -# unfortunately $(top_srcdir) has no meaning in configure. -# However there are other variables, like CC, which are often used in -# configure, and could therefore not use this "fixed" $ac_aux_dir. -# -# Another solution, used here, is to always expand $ac_aux_dir to an -# absolute PATH. The drawback is that using absolute paths prevent a -# configured tree to be moved without reconfiguration. - -AC_DEFUN([AM_AUX_DIR_EXPAND], -[dnl Rely on autoconf to set up CDPATH properly. -AC_PREREQ([2.50])dnl -# expand $ac_aux_dir to an absolute path -am_aux_dir=`cd $ac_aux_dir && pwd` -]) - -# AM_CONDITIONAL -*- Autoconf -*- - -# Copyright (C) 1997-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_CONDITIONAL(NAME, SHELL-CONDITION) -# ------------------------------------- -# Define a conditional. -AC_DEFUN([AM_CONDITIONAL], -[AC_PREREQ([2.52])dnl - m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], - [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl -AC_SUBST([$1_TRUE])dnl -AC_SUBST([$1_FALSE])dnl -_AM_SUBST_NOTMAKE([$1_TRUE])dnl -_AM_SUBST_NOTMAKE([$1_FALSE])dnl -m4_define([_AM_COND_VALUE_$1], [$2])dnl -if $2; then - $1_TRUE= - $1_FALSE='#' -else - $1_TRUE='#' - $1_FALSE= -fi -AC_CONFIG_COMMANDS_PRE( -[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then - AC_MSG_ERROR([[conditional "$1" was never defined. -Usually this means the macro was only invoked conditionally.]]) -fi])]) - -# Copyright (C) 1999-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - - -# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be -# written in clear, in which case automake, when reading aclocal.m4, -# will think it sees a *use*, and therefore will trigger all it's -# C support machinery. Also note that it means that autoscan, seeing -# CC etc. in the Makefile, will ask for an AC_PROG_CC use... - - -# _AM_DEPENDENCIES(NAME) -# ---------------------- -# See how the compiler implements dependency checking. -# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". -# We try a few techniques and use that to set a single cache variable. -# -# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was -# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular -# dependency, and given that the user is not expected to run this macro, -# just rely on AC_PROG_CC. -AC_DEFUN([_AM_DEPENDENCIES], -[AC_REQUIRE([AM_SET_DEPDIR])dnl -AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl -AC_REQUIRE([AM_MAKE_INCLUDE])dnl -AC_REQUIRE([AM_DEP_TRACK])dnl - -m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], - [$1], [CXX], [depcc="$CXX" am_compiler_list=], - [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], - [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], - [$1], [UPC], [depcc="$UPC" am_compiler_list=], - [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], - [depcc="$$1" am_compiler_list=]) - -AC_CACHE_CHECK([dependency style of $depcc], - [am_cv_$1_dependencies_compiler_type], -[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then - # We make a subdir and do the tests there. Otherwise we can end up - # making bogus files that we don't know about and never remove. For - # instance it was reported that on HP-UX the gcc test will end up - # making a dummy file named 'D' -- because '-MD' means "put the output - # in D". - rm -rf conftest.dir - mkdir conftest.dir - # Copy depcomp to subdir because otherwise we won't find it if we're - # using a relative directory. - cp "$am_depcomp" conftest.dir - cd conftest.dir - # We will build objects and dependencies in a subdirectory because - # it helps to detect inapplicable dependency modes. For instance - # both Tru64's cc and ICC support -MD to output dependencies as a - # side effect of compilation, but ICC will put the dependencies in - # the current directory while Tru64 will put them in the object - # directory. - mkdir sub - - am_cv_$1_dependencies_compiler_type=none - if test "$am_compiler_list" = ""; then - am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` - fi - am__universal=false - m4_case([$1], [CC], - [case " $depcc " in #( - *\ -arch\ *\ -arch\ *) am__universal=true ;; - esac], - [CXX], - [case " $depcc " in #( - *\ -arch\ *\ -arch\ *) am__universal=true ;; - esac]) - - for depmode in $am_compiler_list; do - # Setup a source with many dependencies, because some compilers - # like to wrap large dependency lists on column 80 (with \), and - # we should not choose a depcomp mode which is confused by this. - # - # We need to recreate these files for each test, as the compiler may - # overwrite some of them when testing with obscure command lines. - # This happens at least with the AIX C compiler. - : > sub/conftest.c - for i in 1 2 3 4 5 6; do - echo '#include "conftst'$i'.h"' >> sub/conftest.c - # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with - # Solaris 10 /bin/sh. - echo '/* dummy */' > sub/conftst$i.h - done - echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf - - # We check with '-c' and '-o' for the sake of the "dashmstdout" - # mode. It turns out that the SunPro C++ compiler does not properly - # handle '-M -o', and we need to detect this. Also, some Intel - # versions had trouble with output in subdirs. - am__obj=sub/conftest.${OBJEXT-o} - am__minus_obj="-o $am__obj" - case $depmode in - gcc) - # This depmode causes a compiler race in universal mode. - test "$am__universal" = false || continue - ;; - nosideeffect) - # After this tag, mechanisms are not by side-effect, so they'll - # only be used when explicitly requested. - if test "x$enable_dependency_tracking" = xyes; then - continue - else - break - fi - ;; - msvc7 | msvc7msys | msvisualcpp | msvcmsys) - # This compiler won't grok '-c -o', but also, the minuso test has - # not run yet. These depmodes are late enough in the game, and - # so weak that their functioning should not be impacted. - am__obj=conftest.${OBJEXT-o} - am__minus_obj= - ;; - none) break ;; - esac - if depmode=$depmode \ - source=sub/conftest.c object=$am__obj \ - depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ - $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ - >/dev/null 2>conftest.err && - grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && - grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && - grep $am__obj sub/conftest.Po > /dev/null 2>&1 && - ${MAKE-make} -s -f confmf > /dev/null 2>&1; then - # icc doesn't choke on unknown options, it will just issue warnings - # or remarks (even with -Werror). So we grep stderr for any message - # that says an option was ignored or not supported. - # When given -MP, icc 7.0 and 7.1 complain thusly: - # icc: Command line warning: ignoring option '-M'; no argument required - # The diagnosis changed in icc 8.0: - # icc: Command line remark: option '-MP' not supported - if (grep 'ignoring option' conftest.err || - grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else - am_cv_$1_dependencies_compiler_type=$depmode - break - fi - fi - done - - cd .. - rm -rf conftest.dir -else - am_cv_$1_dependencies_compiler_type=none -fi -]) -AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) -AM_CONDITIONAL([am__fastdep$1], [ - test "x$enable_dependency_tracking" != xno \ - && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) -]) - - -# AM_SET_DEPDIR -# ------------- -# Choose a directory name for dependency files. -# This macro is AC_REQUIREd in _AM_DEPENDENCIES. -AC_DEFUN([AM_SET_DEPDIR], -[AC_REQUIRE([AM_SET_LEADING_DOT])dnl -AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl -]) - - -# AM_DEP_TRACK -# ------------ -AC_DEFUN([AM_DEP_TRACK], -[AC_ARG_ENABLE([dependency-tracking], [dnl -AS_HELP_STRING( - [--enable-dependency-tracking], - [do not reject slow dependency extractors]) -AS_HELP_STRING( - [--disable-dependency-tracking], - [speeds up one-time build])]) -if test "x$enable_dependency_tracking" != xno; then - am_depcomp="$ac_aux_dir/depcomp" - AMDEPBACKSLASH='\' - am__nodep='_no' -fi -AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) -AC_SUBST([AMDEPBACKSLASH])dnl -_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl -AC_SUBST([am__nodep])dnl -_AM_SUBST_NOTMAKE([am__nodep])dnl -]) - -# Generate code to set up dependency tracking. -*- Autoconf -*- - -# Copyright (C) 1999-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - - -# _AM_OUTPUT_DEPENDENCY_COMMANDS -# ------------------------------ -AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], -[{ - # Older Autoconf quotes --file arguments for eval, but not when files - # are listed without --file. Let's play safe and only enable the eval - # if we detect the quoting. - case $CONFIG_FILES in - *\'*) eval set x "$CONFIG_FILES" ;; - *) set x $CONFIG_FILES ;; - esac - shift - for mf - do - # Strip MF so we end up with the name of the file. - mf=`echo "$mf" | sed -e 's/:.*$//'` - # Check whether this is an Automake generated Makefile or not. - # We used to match only the files named 'Makefile.in', but - # some people rename them; so instead we look at the file content. - # Grep'ing the first line is not enough: some people post-process - # each Makefile.in and add a new line on top of each file to say so. - # Grep'ing the whole file is not good either: AIX grep has a line - # limit of 2048, but all sed's we know have understand at least 4000. - if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then - dirpart=`AS_DIRNAME("$mf")` - else - continue - fi - # Extract the definition of DEPDIR, am__include, and am__quote - # from the Makefile without running 'make'. - DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` - test -z "$DEPDIR" && continue - am__include=`sed -n 's/^am__include = //p' < "$mf"` - test -z "$am__include" && continue - am__quote=`sed -n 's/^am__quote = //p' < "$mf"` - # Find all dependency output files, they are included files with - # $(DEPDIR) in their names. We invoke sed twice because it is the - # simplest approach to changing $(DEPDIR) to its actual value in the - # expansion. - for file in `sed -n " - s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ - sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do - # Make sure the directory exists. - test -f "$dirpart/$file" && continue - fdir=`AS_DIRNAME(["$file"])` - AS_MKDIR_P([$dirpart/$fdir]) - # echo "creating $dirpart/$file" - echo '# dummy' > "$dirpart/$file" - done - done -} -])# _AM_OUTPUT_DEPENDENCY_COMMANDS - - -# AM_OUTPUT_DEPENDENCY_COMMANDS -# ----------------------------- -# This macro should only be invoked once -- use via AC_REQUIRE. -# -# This code is only required when automatic dependency tracking -# is enabled. FIXME. This creates each '.P' file that we will -# need in order to bootstrap the dependency handling code. -AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], -[AC_CONFIG_COMMANDS([depfiles], - [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], - [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) -]) - -# Do all the work for Automake. -*- Autoconf -*- - -# Copyright (C) 1996-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This macro actually does too much. Some checks are only needed if -# your package does certain things. But this isn't really a big deal. - -dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. -m4_define([AC_PROG_CC], -m4_defn([AC_PROG_CC]) -[_AM_PROG_CC_C_O -]) - -# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) -# AM_INIT_AUTOMAKE([OPTIONS]) -# ----------------------------------------------- -# The call with PACKAGE and VERSION arguments is the old style -# call (pre autoconf-2.50), which is being phased out. PACKAGE -# and VERSION should now be passed to AC_INIT and removed from -# the call to AM_INIT_AUTOMAKE. -# We support both call styles for the transition. After -# the next Automake release, Autoconf can make the AC_INIT -# arguments mandatory, and then we can depend on a new Autoconf -# release and drop the old call support. -AC_DEFUN([AM_INIT_AUTOMAKE], -[AC_PREREQ([2.65])dnl -dnl Autoconf wants to disallow AM_ names. We explicitly allow -dnl the ones we care about. -m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl -AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl -AC_REQUIRE([AC_PROG_INSTALL])dnl -if test "`cd $srcdir && pwd`" != "`pwd`"; then - # Use -I$(srcdir) only when $(srcdir) != ., so that make's output - # is not polluted with repeated "-I." - AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl - # test to see if srcdir already configured - if test -f $srcdir/config.status; then - AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) - fi -fi - -# test whether we have cygpath -if test -z "$CYGPATH_W"; then - if (cygpath --version) >/dev/null 2>/dev/null; then - CYGPATH_W='cygpath -w' - else - CYGPATH_W=echo - fi -fi -AC_SUBST([CYGPATH_W]) - -# Define the identity of the package. -dnl Distinguish between old-style and new-style calls. -m4_ifval([$2], -[AC_DIAGNOSE([obsolete], - [$0: two- and three-arguments forms are deprecated.]) -m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl - AC_SUBST([PACKAGE], [$1])dnl - AC_SUBST([VERSION], [$2])], -[_AM_SET_OPTIONS([$1])dnl -dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. -m4_if( - m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), - [ok:ok],, - [m4_fatal([AC_INIT should be called with package and version arguments])])dnl - AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl - AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl - -_AM_IF_OPTION([no-define],, -[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) - AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl - -# Some tools Automake needs. -AC_REQUIRE([AM_SANITY_CHECK])dnl -AC_REQUIRE([AC_ARG_PROGRAM])dnl -AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) -AM_MISSING_PROG([AUTOCONF], [autoconf]) -AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) -AM_MISSING_PROG([AUTOHEADER], [autoheader]) -AM_MISSING_PROG([MAKEINFO], [makeinfo]) -AC_REQUIRE([AM_PROG_INSTALL_SH])dnl -AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl -AC_REQUIRE([AC_PROG_MKDIR_P])dnl -# For better backward compatibility. To be removed once Automake 1.9.x -# dies out for good. For more background, see: -# -# -AC_SUBST([mkdir_p], ['$(MKDIR_P)']) -# We need awk for the "check" target. The system "awk" is bad on -# some platforms. -AC_REQUIRE([AC_PROG_AWK])dnl -AC_REQUIRE([AC_PROG_MAKE_SET])dnl -AC_REQUIRE([AM_SET_LEADING_DOT])dnl -_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], - [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], - [_AM_PROG_TAR([v7])])]) -_AM_IF_OPTION([no-dependencies],, -[AC_PROVIDE_IFELSE([AC_PROG_CC], - [_AM_DEPENDENCIES([CC])], - [m4_define([AC_PROG_CC], - m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl -AC_PROVIDE_IFELSE([AC_PROG_CXX], - [_AM_DEPENDENCIES([CXX])], - [m4_define([AC_PROG_CXX], - m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl -AC_PROVIDE_IFELSE([AC_PROG_OBJC], - [_AM_DEPENDENCIES([OBJC])], - [m4_define([AC_PROG_OBJC], - m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl -AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], - [_AM_DEPENDENCIES([OBJCXX])], - [m4_define([AC_PROG_OBJCXX], - m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl -]) -AC_REQUIRE([AM_SILENT_RULES])dnl -dnl The testsuite driver may need to know about EXEEXT, so add the -dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This -dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. -AC_CONFIG_COMMANDS_PRE(dnl -[m4_provide_if([_AM_COMPILER_EXEEXT], - [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl - -# POSIX will say in a future version that running "rm -f" with no argument -# is OK; and we want to be able to make that assumption in our Makefile -# recipes. So use an aggressive probe to check that the usage we want is -# actually supported "in the wild" to an acceptable degree. -# See automake bug#10828. -# To make any issue more visible, cause the running configure to be aborted -# by default if the 'rm' program in use doesn't match our expectations; the -# user can still override this though. -if rm -f && rm -fr && rm -rf; then : OK; else - cat >&2 <<'END' -Oops! - -Your 'rm' program seems unable to run without file operands specified -on the command line, even when the '-f' option is present. This is contrary -to the behaviour of most rm programs out there, and not conforming with -the upcoming POSIX standard: - -Please tell bug-automake@gnu.org about your system, including the value -of your $PATH and any error possibly output before this message. This -can help us improve future automake versions. - -END - if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then - echo 'Configuration will proceed anyway, since you have set the' >&2 - echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 - echo >&2 - else - cat >&2 <<'END' -Aborting the configuration process, to ensure you take notice of the issue. - -You can download and install GNU coreutils to get an 'rm' implementation -that behaves properly: . - -If you want to complete the configuration process using your problematic -'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM -to "yes", and re-run configure. - -END - AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) - fi -fi]) - -dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not -dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further -dnl mangled by Autoconf and run in a shell conditional statement. -m4_define([_AC_COMPILER_EXEEXT], -m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) - -# When config.status generates a header, we must update the stamp-h file. -# This file resides in the same directory as the config header -# that is generated. The stamp files are numbered to have different names. - -# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the -# loop where config.status creates the headers, so we can generate -# our stamp files there. -AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], -[# Compute $1's index in $config_headers. -_am_arg=$1 -_am_stamp_count=1 -for _am_header in $config_headers :; do - case $_am_header in - $_am_arg | $_am_arg:* ) - break ;; - * ) - _am_stamp_count=`expr $_am_stamp_count + 1` ;; - esac -done -echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) - -# Copyright (C) 2001-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_PROG_INSTALL_SH -# ------------------ -# Define $install_sh. -AC_DEFUN([AM_PROG_INSTALL_SH], -[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl -if test x"${install_sh}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; - *) - install_sh="\${SHELL} $am_aux_dir/install-sh" - esac -fi -AC_SUBST([install_sh])]) - -# Copyright (C) 2003-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# Check whether the underlying file-system supports filenames -# with a leading dot. For instance MS-DOS doesn't. -AC_DEFUN([AM_SET_LEADING_DOT], -[rm -rf .tst 2>/dev/null -mkdir .tst 2>/dev/null -if test -d .tst; then - am__leading_dot=. -else - am__leading_dot=_ -fi -rmdir .tst 2>/dev/null -AC_SUBST([am__leading_dot])]) - -# Add --enable-maintainer-mode option to configure. -*- Autoconf -*- -# From Jim Meyering - -# Copyright (C) 1996-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_MAINTAINER_MODE([DEFAULT-MODE]) -# ---------------------------------- -# Control maintainer-specific portions of Makefiles. -# Default is to disable them, unless 'enable' is passed literally. -# For symmetry, 'disable' may be passed as well. Anyway, the user -# can override the default with the --enable/--disable switch. -AC_DEFUN([AM_MAINTAINER_MODE], -[m4_case(m4_default([$1], [disable]), - [enable], [m4_define([am_maintainer_other], [disable])], - [disable], [m4_define([am_maintainer_other], [enable])], - [m4_define([am_maintainer_other], [enable]) - m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])]) -AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) - dnl maintainer-mode's default is 'disable' unless 'enable' is passed - AC_ARG_ENABLE([maintainer-mode], - [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode], - am_maintainer_other[ make rules and dependencies not useful - (and sometimes confusing) to the casual installer])], - [USE_MAINTAINER_MODE=$enableval], - [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes])) - AC_MSG_RESULT([$USE_MAINTAINER_MODE]) - AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes]) - MAINT=$MAINTAINER_MODE_TRUE - AC_SUBST([MAINT])dnl -] -) - -# Check to see how 'make' treats includes. -*- Autoconf -*- - -# Copyright (C) 2001-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_MAKE_INCLUDE() -# ----------------- -# Check to see how make treats includes. -AC_DEFUN([AM_MAKE_INCLUDE], -[am_make=${MAKE-make} -cat > confinc << 'END' -am__doit: - @echo this is the am__doit target -.PHONY: am__doit -END -# If we don't find an include directive, just comment out the code. -AC_MSG_CHECKING([for style of include used by $am_make]) -am__include="#" -am__quote= -_am_result=none -# First try GNU make style include. -echo "include confinc" > confmf -# Ignore all kinds of additional output from 'make'. -case `$am_make -s -f confmf 2> /dev/null` in #( -*the\ am__doit\ target*) - am__include=include - am__quote= - _am_result=GNU - ;; -esac -# Now try BSD make style include. -if test "$am__include" = "#"; then - echo '.include "confinc"' > confmf - case `$am_make -s -f confmf 2> /dev/null` in #( - *the\ am__doit\ target*) - am__include=.include - am__quote="\"" - _am_result=BSD - ;; - esac -fi -AC_SUBST([am__include]) -AC_SUBST([am__quote]) -AC_MSG_RESULT([$_am_result]) -rm -f confinc confmf -]) - -# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- - -# Copyright (C) 1997-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_MISSING_PROG(NAME, PROGRAM) -# ------------------------------ -AC_DEFUN([AM_MISSING_PROG], -[AC_REQUIRE([AM_MISSING_HAS_RUN]) -$1=${$1-"${am_missing_run}$2"} -AC_SUBST($1)]) - -# AM_MISSING_HAS_RUN -# ------------------ -# Define MISSING if not defined so far and test if it is modern enough. -# If it is, set am_missing_run to use it, otherwise, to nothing. -AC_DEFUN([AM_MISSING_HAS_RUN], -[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl -AC_REQUIRE_AUX_FILE([missing])dnl -if test x"${MISSING+set}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; - *) - MISSING="\${SHELL} $am_aux_dir/missing" ;; - esac -fi -# Use eval to expand $SHELL -if eval "$MISSING --is-lightweight"; then - am_missing_run="$MISSING " -else - am_missing_run= - AC_MSG_WARN(['missing' script is too old or missing]) -fi -]) - -# Helper functions for option handling. -*- Autoconf -*- - -# Copyright (C) 2001-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# _AM_MANGLE_OPTION(NAME) -# ----------------------- -AC_DEFUN([_AM_MANGLE_OPTION], -[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) - -# _AM_SET_OPTION(NAME) -# -------------------- -# Set option NAME. Presently that only means defining a flag for this option. -AC_DEFUN([_AM_SET_OPTION], -[m4_define(_AM_MANGLE_OPTION([$1]), [1])]) - -# _AM_SET_OPTIONS(OPTIONS) -# ------------------------ -# OPTIONS is a space-separated list of Automake options. -AC_DEFUN([_AM_SET_OPTIONS], -[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) - -# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) -# ------------------------------------------- -# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. -AC_DEFUN([_AM_IF_OPTION], -[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) - -# Copyright (C) 1999-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# _AM_PROG_CC_C_O -# --------------- -# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC -# to automatically call this. -AC_DEFUN([_AM_PROG_CC_C_O], -[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl -AC_REQUIRE_AUX_FILE([compile])dnl -AC_LANG_PUSH([C])dnl -AC_CACHE_CHECK( - [whether $CC understands -c and -o together], - [am_cv_prog_cc_c_o], - [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) - # Make sure it works both with $CC and with simple cc. - # Following AC_PROG_CC_C_O, we do the test twice because some - # compilers refuse to overwrite an existing .o file with -o, - # though they will create one. - am_cv_prog_cc_c_o=yes - for am_i in 1 2; do - if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ - && test -f conftest2.$ac_objext; then - : OK - else - am_cv_prog_cc_c_o=no - break - fi - done - rm -f core conftest* - unset am_i]) -if test "$am_cv_prog_cc_c_o" != yes; then - # Losing compiler, so override with the script. - # FIXME: It is wrong to rewrite CC. - # But if we don't then we get into trouble of one sort or another. - # A longer-term fix would be to have automake use am__CC in this case, - # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" - CC="$am_aux_dir/compile $CC" -fi -AC_LANG_POP([C])]) - -# For backward compatibility. -AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) - -# Copyright (C) 2001-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_RUN_LOG(COMMAND) -# ------------------- -# Run COMMAND, save the exit status in ac_status, and log it. -# (This has been adapted from Autoconf's _AC_RUN_LOG macro.) -AC_DEFUN([AM_RUN_LOG], -[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD - ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD - (exit $ac_status); }]) - -# Check to make sure that the build environment is sane. -*- Autoconf -*- - -# Copyright (C) 1996-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_SANITY_CHECK -# --------------- -AC_DEFUN([AM_SANITY_CHECK], -[AC_MSG_CHECKING([whether build environment is sane]) -# Reject unsafe characters in $srcdir or the absolute working directory -# name. Accept space and tab only in the latter. -am_lf=' -' -case `pwd` in - *[[\\\"\#\$\&\'\`$am_lf]]*) - AC_MSG_ERROR([unsafe absolute working directory name]);; -esac -case $srcdir in - *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) - AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; -esac - -# Do 'set' in a subshell so we don't clobber the current shell's -# arguments. Must try -L first in case configure is actually a -# symlink; some systems play weird games with the mod time of symlinks -# (eg FreeBSD returns the mod time of the symlink's containing -# directory). -if ( - am_has_slept=no - for am_try in 1 2; do - echo "timestamp, slept: $am_has_slept" > conftest.file - set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` - if test "$[*]" = "X"; then - # -L didn't work. - set X `ls -t "$srcdir/configure" conftest.file` - fi - if test "$[*]" != "X $srcdir/configure conftest.file" \ - && test "$[*]" != "X conftest.file $srcdir/configure"; then - - # If neither matched, then we have a broken ls. This can happen - # if, for instance, CONFIG_SHELL is bash and it inherits a - # broken ls alias from the environment. This has actually - # happened. Such a system could not be considered "sane". - AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken - alias in your environment]) - fi - if test "$[2]" = conftest.file || test $am_try -eq 2; then - break - fi - # Just in case. - sleep 1 - am_has_slept=yes - done - test "$[2]" = conftest.file - ) -then - # Ok. - : -else - AC_MSG_ERROR([newly created file is older than distributed files! -Check your system clock]) -fi -AC_MSG_RESULT([yes]) -# If we didn't sleep, we still need to ensure time stamps of config.status and -# generated files are strictly newer. -am_sleep_pid= -if grep 'slept: no' conftest.file >/dev/null 2>&1; then - ( sleep 1 ) & - am_sleep_pid=$! -fi -AC_CONFIG_COMMANDS_PRE( - [AC_MSG_CHECKING([that generated files are newer than configure]) - if test -n "$am_sleep_pid"; then - # Hide warnings about reused PIDs. - wait $am_sleep_pid 2>/dev/null - fi - AC_MSG_RESULT([done])]) -rm -f conftest.file -]) - -# Copyright (C) 2009-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_SILENT_RULES([DEFAULT]) -# -------------------------- -# Enable less verbose build rules; with the default set to DEFAULT -# ("yes" being less verbose, "no" or empty being verbose). -AC_DEFUN([AM_SILENT_RULES], -[AC_ARG_ENABLE([silent-rules], [dnl -AS_HELP_STRING( - [--enable-silent-rules], - [less verbose build output (undo: "make V=1")]) -AS_HELP_STRING( - [--disable-silent-rules], - [verbose build output (undo: "make V=0")])dnl -]) -case $enable_silent_rules in @%:@ ((( - yes) AM_DEFAULT_VERBOSITY=0;; - no) AM_DEFAULT_VERBOSITY=1;; - *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; -esac -dnl -dnl A few 'make' implementations (e.g., NonStop OS and NextStep) -dnl do not support nested variable expansions. -dnl See automake bug#9928 and bug#10237. -am_make=${MAKE-make} -AC_CACHE_CHECK([whether $am_make supports nested variables], - [am_cv_make_support_nested_variables], - [if AS_ECHO([['TRUE=$(BAR$(V)) -BAR0=false -BAR1=true -V=1 -am__doit: - @$(TRUE) -.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then - am_cv_make_support_nested_variables=yes -else - am_cv_make_support_nested_variables=no -fi]) -if test $am_cv_make_support_nested_variables = yes; then - dnl Using '$V' instead of '$(V)' breaks IRIX make. - AM_V='$(V)' - AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' -else - AM_V=$AM_DEFAULT_VERBOSITY - AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY -fi -AC_SUBST([AM_V])dnl -AM_SUBST_NOTMAKE([AM_V])dnl -AC_SUBST([AM_DEFAULT_V])dnl -AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl -AC_SUBST([AM_DEFAULT_VERBOSITY])dnl -AM_BACKSLASH='\' -AC_SUBST([AM_BACKSLASH])dnl -_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl -]) - -# Copyright (C) 2001-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# AM_PROG_INSTALL_STRIP -# --------------------- -# One issue with vendor 'install' (even GNU) is that you can't -# specify the program used to strip binaries. This is especially -# annoying in cross-compiling environments, where the build's strip -# is unlikely to handle the host's binaries. -# Fortunately install-sh will honor a STRIPPROG variable, so we -# always use install-sh in "make install-strip", and initialize -# STRIPPROG with the value of the STRIP variable (set by the user). -AC_DEFUN([AM_PROG_INSTALL_STRIP], -[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl -# Installed binaries are usually stripped using 'strip' when the user -# run "make install-strip". However 'strip' might not be the right -# tool to use in cross-compilation environments, therefore Automake -# will honor the 'STRIP' environment variable to overrule this program. -dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. -if test "$cross_compiling" != no; then - AC_CHECK_TOOL([STRIP], [strip], :) -fi -INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" -AC_SUBST([INSTALL_STRIP_PROGRAM])]) - -# Copyright (C) 2006-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# _AM_SUBST_NOTMAKE(VARIABLE) -# --------------------------- -# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. -# This macro is traced by Automake. -AC_DEFUN([_AM_SUBST_NOTMAKE]) - -# AM_SUBST_NOTMAKE(VARIABLE) -# -------------------------- -# Public sister of _AM_SUBST_NOTMAKE. -AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) - -# Check how to create a tarball. -*- Autoconf -*- - -# Copyright (C) 2004-2013 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# _AM_PROG_TAR(FORMAT) -# -------------------- -# Check how to create a tarball in format FORMAT. -# FORMAT should be one of 'v7', 'ustar', or 'pax'. -# -# Substitute a variable $(am__tar) that is a command -# writing to stdout a FORMAT-tarball containing the directory -# $tardir. -# tardir=directory && $(am__tar) > result.tar -# -# Substitute a variable $(am__untar) that extract such -# a tarball read from stdin. -# $(am__untar) < result.tar -# -AC_DEFUN([_AM_PROG_TAR], -[# Always define AMTAR for backward compatibility. Yes, it's still used -# in the wild :-( We should find a proper way to deprecate it ... -AC_SUBST([AMTAR], ['$${TAR-tar}']) - -# We'll loop over all known methods to create a tar archive until one works. -_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' - -m4_if([$1], [v7], - [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], - - [m4_case([$1], - [ustar], - [# The POSIX 1988 'ustar' format is defined with fixed-size fields. - # There is notably a 21 bits limit for the UID and the GID. In fact, - # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 - # and bug#13588). - am_max_uid=2097151 # 2^21 - 1 - am_max_gid=$am_max_uid - # The $UID and $GID variables are not portable, so we need to resort - # to the POSIX-mandated id(1) utility. Errors in the 'id' calls - # below are definitely unexpected, so allow the users to see them - # (that is, avoid stderr redirection). - am_uid=`id -u || echo unknown` - am_gid=`id -g || echo unknown` - AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) - if test $am_uid -le $am_max_uid; then - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - _am_tools=none - fi - AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) - if test $am_gid -le $am_max_gid; then - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - _am_tools=none - fi], - - [pax], - [], - - [m4_fatal([Unknown tar format])]) - - AC_MSG_CHECKING([how to create a $1 tar archive]) - - # Go ahead even if we have the value already cached. We do so because we - # need to set the values for the 'am__tar' and 'am__untar' variables. - _am_tools=${am_cv_prog_tar_$1-$_am_tools} - - for _am_tool in $_am_tools; do - case $_am_tool in - gnutar) - for _am_tar in tar gnutar gtar; do - AM_RUN_LOG([$_am_tar --version]) && break - done - am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' - am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' - am__untar="$_am_tar -xf -" - ;; - plaintar) - # Must skip GNU tar: if it does not support --format= it doesn't create - # ustar tarball either. - (tar --version) >/dev/null 2>&1 && continue - am__tar='tar chf - "$$tardir"' - am__tar_='tar chf - "$tardir"' - am__untar='tar xf -' - ;; - pax) - am__tar='pax -L -x $1 -w "$$tardir"' - am__tar_='pax -L -x $1 -w "$tardir"' - am__untar='pax -r' - ;; - cpio) - am__tar='find "$$tardir" -print | cpio -o -H $1 -L' - am__tar_='find "$tardir" -print | cpio -o -H $1 -L' - am__untar='cpio -i -H $1 -d' - ;; - none) - am__tar=false - am__tar_=false - am__untar=false - ;; - esac - - # If the value was cached, stop now. We just wanted to have am__tar - # and am__untar set. - test -n "${am_cv_prog_tar_$1}" && break - - # tar/untar a dummy directory, and stop if the command works. - rm -rf conftest.dir - mkdir conftest.dir - echo GrepMe > conftest.dir/file - AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) - rm -rf conftest.dir - if test -s conftest.tar; then - AM_RUN_LOG([$am__untar /dev/null 2>&1 && break - fi - done - rm -rf conftest.dir - - AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) - AC_MSG_RESULT([$am_cv_prog_tar_$1])]) - -AC_SUBST([am__tar]) -AC_SUBST([am__untar]) -]) # _AM_PROG_TAR - -m4_include([build/m4/ax_c___atomic.m4]) -m4_include([build/m4/ax_c__generic.m4]) -m4_include([build/m4/ax_c_lto.m4]) -m4_include([build/m4/ax_c_mallinfo.m4]) -m4_include([build/m4/ax_c_mallopt.m4]) -m4_include([build/m4/ax_check_compile_flag.m4]) -m4_include([build/m4/ax_gcc_func_attribute.m4]) -m4_include([build/m4/ax_pthread.m4]) -m4_include([build/m4/jemalloc.m4]) -m4_include([build/m4/tcmalloc.m4]) diff --git a/backends/Makefile.in b/backends/Makefile.in deleted file mode 100644 index 026377845..000000000 --- a/backends/Makefile.in +++ /dev/null @@ -1,658 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = backends -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -SCRIPTS = $(dist_noinst_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - graphite \ - json \ - opentsdb \ - prometheus \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - WALKTHROUGH.md \ - $(NULL) - -dist_noinst_SCRIPTS = \ - nc-backend.sh \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu backends/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/backends/README.md b/backends/README.md index cc943d4d7..22dc77597 100644 --- a/backends/README.md +++ b/backends/README.md @@ -1,4 +1,4 @@ -# Metrics Long Term Archiving +# Metrics long term archiving netdata supports backends for archiving the metrics, or providing long term dashboards, using Grafana or other tools, like this: @@ -36,22 +36,25 @@ X seconds (though, it can send them per second if you need it to). 3. Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics. -4. Three modes of operation (for all backends): +4. Netdata supports three modes of operation for all backends: - - `as collected`: the latest collected value is sent to the backend. This means that if netdata - is configured to send data to the backend every 10 seconds, only 1 out of 10 values will appear - at the backend server. The values are sent exactly as collected, before any multipliers or - dividers applied and before any interpolation. This mode emulates other data collectors, - such as `collectd` or `telegraf`. + - `as-collected` sends to backends the metrics as they are collected, in the units they are collected. + So, counters are sent as counters and gauges are sent as gauges, much like all data collectors do. + For example, to calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage. - - `average`: the average of the interpolated values shown on the netdata graphs is sent to the - backend. So, if netdata is configured to send data to the backend server every 10 seconds, - the average of the 10 values shown on the netdata charts will be used. **If you can't decide - which mode to use, use `average`.** + - `average` sends to backends normalized metrics from the netdata database. + In this mode, all metrics are sent as gauges, in the units netdata uses. This abstracts data collection + and simplifies visualization, but you will not be able to copy and paste queries from other sources to convert units. + For example, CPU utilization percentage is calculated by netdata, so netdata will convert ticks to percentage and + send the average percentage to the backend. - - `sum` or `volume`: the sum of the interpolated values shown on the netdata graphs is sent to - the backend. So, if netdata is configured to send data to the backend every 10 seconds, the - sum of the 10 values shown on the netdata charts will be used. + - `sum` or `volume`: the sum of the interpolated values shown on the netdata graphs is sent to the backend. + So, if netdata is configured to send data to the backend every 10 seconds, the sum of the 10 values shown on the + netdata charts will be used. + +Time-series databases suggest to collect the raw values (`as-collected`). If you plan to invest on building your monitoring around a time-series database and you already know (or you will invest in learning) how to convert units and normalize the metrics in Grafana or other visualization tools, we suggest to use `as-collected`. + +If, on the other hand, you just need long term archiving of netdata metrics and you plan to mainly work with netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot simpler. Furthermore, if you use `average`, the charts shown in the back-end will match exactly what you see in Netdata, which is not necessarily true for the other modes of operation. 5. This code is smart enough, not to slow down netdata, independently of the speed of the backend server. @@ -196,3 +199,5 @@ netdata adds 4 alarms: ![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/backends/WALKTHROUGH.md b/backends/WALKTHROUGH.md index b899556ab..0c330ee1a 100644 --- a/backends/WALKTHROUGH.md +++ b/backends/WALKTHROUGH.md @@ -67,7 +67,7 @@ chooses the base container images (centos:latest). After running this you should be sitting inside the shell of the container. After we have entered the shell we can install Netdata. This process could not -be easier. If you take a look at [this link](../installer/#installation), the Netdata devs give us +be easier. If you take a look at [this link](../packaging/installer/#installation), the Netdata devs give us several one-liners to install netdata. I have not had any issues with these one liners and their bootstrapping scripts so far (If you guys run into anything do share). Run the following command in your container. @@ -290,3 +290,5 @@ automatically begins to scrape them. Once achieved you do not have to think about the monitoring system until Prometheus cannot keep up with your scale. Once this happens there are options presented in the Prometheus documentation for solving this. Hope this was helpful, happy monitoring. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2FWALKTHROUGH&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/backends/backends.c b/backends/backends.c index 53a9a2395..da818c50b 100644 --- a/backends/backends.c +++ b/backends/backends.c @@ -352,7 +352,7 @@ void *backends_main(void *ptr) { rrddim_add(chart_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); rrddim_add(chart_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - RRDSET *chart_bytes = rrdset_create_localhost("netdata", "backend_bytes", NULL, "backend", NULL, "Netdata Backend Data Size", "KB", "backends", NULL, 130610, global_backend_update_every, RRDSET_TYPE_AREA); + RRDSET *chart_bytes = rrdset_create_localhost("netdata", "backend_bytes", NULL, "backend", NULL, "Netdata Backend Data Size", "KiB", "backends", NULL, 130610, global_backend_update_every, RRDSET_TYPE_AREA); rrddim_add(chart_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rrddim_add(chart_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); rrddim_add(chart_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); diff --git a/backends/graphite/Makefile.in b/backends/graphite/Makefile.in deleted file mode 100644 index c214a0e75..000000000 --- a/backends/graphite/Makefile.in +++ /dev/null @@ -1,457 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = backends/graphite -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/graphite/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu backends/graphite/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/backends/json/Makefile.in b/backends/json/Makefile.in deleted file mode 100644 index 88fd11a0c..000000000 --- a/backends/json/Makefile.in +++ /dev/null @@ -1,457 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = backends/json -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/json/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu backends/json/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/backends/opentsdb/Makefile.in b/backends/opentsdb/Makefile.in deleted file mode 100644 index 6c29cce40..000000000 --- a/backends/opentsdb/Makefile.in +++ /dev/null @@ -1,457 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = backends/opentsdb -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/opentsdb/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu backends/opentsdb/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/backends/prometheus/Makefile.in b/backends/prometheus/Makefile.in deleted file mode 100644 index 76813e758..000000000 --- a/backends/prometheus/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = backends/prometheus -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu backends/prometheus/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu backends/prometheus/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/backends/prometheus/README.md b/backends/prometheus/README.md index 99a11f942..8cf83cbf4 100644 --- a/backends/prometheus/README.md +++ b/backends/prometheus/README.md @@ -9,7 +9,7 @@ Prometheus is a distributed monitoring system which offers a very simple setup a ### Installing netdata -There are number of ways to install netdata according to [Installation](../../installer/#installation) +There are number of ways to install netdata according to [Installation](../../packaging/installer/#installation) The suggested way of installing the latest netdata and keep it upgrade automatically. Using one line installation: ``` @@ -378,3 +378,5 @@ It can also be changed from the URL, by appending `&prefix=netdata`. When the data source is set to `average` or `sum`, netdata remembers the last access of each client accessing prometheus metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since that. This means that prometheus servers are not losing data when they access netdata with data source = `average` or `sum`. To uniquely identify each prometheus server, netdata uses the IP of the client accessing the metrics. If however the IP is not good enough for identifying a single prometheus server (e.g. when prometheus servers are accessing netdata through a web proxy, or when multiple prometheus servers are NATed to a single IP), each prometheus may append `&server=NAME` to the URL. This `NAME` is used by netdata to uniquely identify each prometheus server and keep track of its last access time. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/backends/prometheus/backend_prometheus.c b/backends/prometheus/backend_prometheus.c index 223b3f9f0..6b0d7ca10 100644 --- a/backends/prometheus/backend_prometheus.c +++ b/backends/prometheus/backend_prometheus.c @@ -298,6 +298,9 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER if (as_collected) { // we need as-collected / raw data + if(unlikely(rd->last_collected_time.tv_sec < after)) + continue; + const char *t = "gauge", *h = "gives"; if(rd->algorithm == RRD_ALGORITHM_INCREMENTAL || rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) { @@ -512,12 +515,9 @@ static inline time_t prometheus_preparation(RRDHOST *host, BUFFER *wb, BACKEND_O } if(output_options & PROMETHEUS_OUTPUT_HELP) { - int show_range = 1; char *mode; - if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED) { + if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED) mode = "as collected"; - show_range = 0; - } else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE) mode = "average"; else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM) @@ -525,19 +525,15 @@ static inline time_t prometheus_preparation(RRDHOST *host, BUFFER *wb, BACKEND_O else mode = "unknown"; - buffer_sprintf(wb, "# COMMENT netdata \"%s\" to %sprometheus \"%s\", source \"%s\", last seen %lu %s" + buffer_sprintf(wb, "# COMMENT netdata \"%s\" to %sprometheus \"%s\", source \"%s\", last seen %lu %s, time range %lu to %lu\n\n" , host->hostname , (first_seen)?"FIRST SEEN ":"" , server , mode , (unsigned long)((first_seen)?0:(now - after)) , (first_seen)?"never":"seconds ago" + , (unsigned long)after, (unsigned long)now ); - - if(show_range) - buffer_sprintf(wb, ", time range %lu to %lu", (unsigned long)after, (unsigned long)now); - - buffer_strcat(wb, "\n\n"); } return after; diff --git a/build/Dockerfile b/build/Dockerfile deleted file mode 100644 index 8a816b825..000000000 --- a/build/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM gcc:8 - -RUN apt-get update && apt-get install -y \ - autoconf-archive \ - autogen \ - libmnl-dev \ - uuid-dev \ - && rm -rf /var/lib/apt/lists/* diff --git a/build/build.sh b/build/build.sh index ee087b98f..892a7da98 100755 --- a/build/build.sh +++ b/build/build.sh @@ -1,7 +1,8 @@ #!/bin/bash -if [ -f build.sh ]; then - cd ../ || exit 1 +if [ ! -f .gitignore ]; then + echo "Run as ./travis/$(basename "$0") from top level directory of git repository" + exit 1 fi if [ "$IS_CONTAINER" != "" ]; then @@ -10,13 +11,10 @@ if [ "$IS_CONTAINER" != "" ]; then make dist rm -rf autom4te.cache else - if [[ "$(docker images -q netdata-gcc-builder:latest 2> /dev/null)" == "" ]]; then - docker build -t netdata-gcc-builder:latest -f build/Dockerfile . - fi docker run --rm -it \ --env IS_CONTAINER=TRUE \ --volume "${PWD}:/project:Z" \ --workdir "/project" \ - netdata:gcc \ - ./.travis/build.sh + netdata/builder:gcc \ + ./build/build.sh fi diff --git a/build/subst.inc b/build/subst.inc index 8f9ac0551..558d33adf 100644 --- a/build/subst.inc +++ b/build/subst.inc @@ -2,11 +2,11 @@ if sed \ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ -e 's#[@]configdir_POST@#$(configdir)#g' \ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ -e 's#[@]cachedir_POST@#$(cachedir)#g' \ + -e 's#[@]registrydir_POST@#$(registrydir)#g' \ + -e 's#[@]varlibdir_POST@#$(varlibdir)#g' \ $< > $@.tmp; then \ mv "$@.tmp" "$@"; \ else \ diff --git a/collectors/Makefile.am b/collectors/Makefile.am index 4ecd1f176..bb4d5c61d 100644 --- a/collectors/Makefile.am +++ b/collectors/Makefile.am @@ -8,6 +8,7 @@ SUBDIRS = \ cgroups.plugin \ charts.d.plugin \ checks.plugin \ + cups.plugin \ diskspace.plugin \ fping.plugin \ freebsd.plugin \ diff --git a/collectors/Makefile.in b/collectors/Makefile.in deleted file mode 100644 index 357f69d7a..000000000 --- a/collectors/Makefile.in +++ /dev/null @@ -1,663 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - plugins.d \ - apps.plugin \ - cgroups.plugin \ - charts.d.plugin \ - checks.plugin \ - diskspace.plugin \ - fping.plugin \ - freebsd.plugin \ - freeipmi.plugin \ - idlejitter.plugin \ - macos.plugin \ - nfacct.plugin \ - node.d.plugin \ - proc.plugin \ - python.d.plugin \ - statsd.plugin \ - tc.plugin \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/README.md b/collectors/README.md index 83c92d9dd..d0393dae2 100644 --- a/collectors/README.md +++ b/collectors/README.md @@ -1,4 +1,4 @@ -# Data Collection Plugins +# Data collection plugins netdata supports **internal** and **external** data collection plugins: @@ -27,6 +27,7 @@ plugin|lang|O/S|runs as|modular|description [cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems [charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+. [checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled) +[cups.plugin](cups.plugin/)|`C`|any|external|-|monitors **CUPS** [diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points [fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points. [freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems @@ -116,3 +117,5 @@ The best way to find your way through this, is to examine what other similar plu **External plugins** use the API and are managed by [plugins.d](plugins.d/). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/all.h b/collectors/all.h index aa19bd5bd..7817d89bf 100644 --- a/collectors/all.h +++ b/collectors/all.h @@ -69,6 +69,7 @@ #define NETDATA_CHART_PRIO_CPU_PER_CORE 1000 // +1 per core #define NETDATA_CHART_PRIO_CPU_TEMPERATURE 1050 // freebsd only #define NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ 5003 // freebsd only +#define NETDATA_CHART_PRIO_CPUIDLE 6000 #define NETDATA_CHART_PRIO_CORE_THROTTLING 5001 #define NETDATA_CHART_PRIO_PACKAGE_THROTTLING 5002 @@ -297,6 +298,22 @@ #define NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN 8753 #define NETDATA_CHART_PRIO_SYNPROXY_ENTRIES 8754 +// MDSTAT + +#define NETDATA_CHART_PRIO_MDSTAT_HEALTH 9000 +#define NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT 9001 +#define NETDATA_CHART_PRIO_MDSTAT_DISKS 9002 // 5 charts per raid +#define NETDATA_CHART_PRIO_MDSTAT_MISMATCH 9003 +#define NETDATA_CHART_PRIO_MDSTAT_OPERATION 9004 +#define NETDATA_CHART_PRIO_MDSTAT_FINISH 9005 +#define NETDATA_CHART_PRIO_MDSTAT_SPEED 9006 + +// Linux Power Supply +#define NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY 9500 // 4 charts per power supply +#define NETDATA_CHART_PRIO_POWER_SUPPLY_CHARGE 9501 +#define NETDATA_CHART_PRIO_POWER_SUPPLY_ENERGY 9502 +#define NETDATA_CHART_PRIO_POWER_SUPPLY_VOLTAGE 9503 + // CGROUPS #define NETDATA_CHART_PRIO_CGROUPS_SYSTEMD 19000 // many charts diff --git a/collectors/apps.plugin/Makefile.in b/collectors/apps.plugin/Makefile.in deleted file mode 100644 index 38120c048..000000000 --- a/collectors/apps.plugin/Makefile.in +++ /dev/null @@ -1,521 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/apps.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_libconfig_DATA) $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(libconfigdir)" -DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -dist_libconfig_DATA = \ - apps_groups.conf \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/apps.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_libconfigDATA: $(dist_libconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ - done - -uninstall-dist_libconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(libconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_libconfigDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_libconfigDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_libconfigDATA install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-ps install-ps-am \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ - uninstall-am uninstall-dist_libconfigDATA - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md index d1ca8114c..ee5c6971a 100644 --- a/collectors/apps.plugin/README.md +++ b/collectors/apps.plugin/README.md @@ -77,7 +77,7 @@ To do this, edit `/etc/netdata/netdata.conf` and find this section: ``` [plugin:apps] # update every = 1 - # command options = + # command options = ``` Uncomment the line `update every` and set it to a higher number. If you just set it to ` 2 `, @@ -100,7 +100,8 @@ For the **Applications** section, only groups configured in this file are report All other processes will be reported as `other`. For each process given, its whole process tree will be grouped, not just the process matched. -The plugin will include both parents and children. +The plugin will include both parents and children. If including the parents into the group is +undesirable, the line `other: *` should be appended to the `apps_groups.conf`. The process names are the ones returned by: @@ -254,7 +255,7 @@ Exactly like `top`, `htop` is providing an incomplete breakdown of the system CP ``` CPU[||||||||||||||||||||||||100.0%] Tasks: 27, 11 thr; 2 running - Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90 + Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90 Swp[ 0K/0K] Uptime: 3 days, 21:37:03 PID USER PRI NI VIRT RES SHR S CPU% MEM% TIME+ Command @@ -305,10 +306,10 @@ MEM [ 23.7%] user: 30.9% total: 993M total: 0 1 min: 1.18 SWAP [ 0.0%] system: 67.8% used: 236M used: 0 5 min: 1.08 idle: 0.0% free: 757M free: 0 15 min: 1.00 -NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth +NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth eth0 168b 2Kb -eth1 0b 0b CPU% MEM% PID USER NI S Command -lo 0b 0b 13.5 0.4 12789 root 0 S -bash +eth1 0b 0b CPU% MEM% PID USER NI S Command +lo 0b 0b 13.5 0.4 12789 root 0 S -bash 1.6 2.2 7025 root 0 R /usr/bin/python /u DISK I/O R/s W/s 1.0 0.0 9 root 0 S rcuos/0 vda1 0 4K 0.3 0.2 7024 netdata 0 S /usr/libexec/netda @@ -370,3 +371,5 @@ It is even trickier, because walking through the entire process tree takes some if you sum the CPU utilization of all processes, you might have more CPU time than the reported total cpu time of the system. netdata solves this, by adapting the per process cpu utilization to the total of the system. [Netdata adds charts that document this normalization](https://london.my-netdata.io/default.html#menu_netdata_submenu_apps_plugin). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fapps.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf index c0d22fac9..91206410f 100644 --- a/collectors/apps.plugin/apps_groups.conf +++ b/collectors/apps.plugin/apps_groups.conf @@ -79,6 +79,7 @@ node.d.plugin: *node.d.plugin* python.d.plugin: *python.d.plugin* tc-qos-helper: *tc-qos-helper.sh* fping: fping +go.d.plugin: *go.d.plugin* # ----------------------------------------------------------------------------- # authentication/authorization related servers @@ -256,7 +257,8 @@ airflow: *airflow* # X X: X Xorg xinit lightdm xdm pulseaudio gkrellm xfwm4 xfdesktop xfce* Thunar -X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* kdm slim +X: xfsettingsd xfconfd gnome-* gdm gconf* dconf* xfconf* *gvfs gvfs* slim +X: kdeinit* kdm plasmashell X: evolution-* firefox chromium opera vivaldi-bin epiphany WebKit* X: '*systemd --user*' chrome *chrome-sandbox* *google-chrome* *chromium* *firefox* @@ -284,3 +286,4 @@ java: java ipfs: ipfs node: node +factorio: factorio diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c index f592e9fc8..9f392679d 100644 --- a/collectors/apps.plugin/apps_plugin.c +++ b/collectors/apps.plugin/apps_plugin.c @@ -15,6 +15,12 @@ void netdata_cleanup_and_exit(int ret) { exit(ret); } +void send_statistics( const char *action, const char *action_result, const char *action_data) { + (void) action; + (void) action_result; + (void) action_data; + return; +} // callbacks required by popen() void signals_block(void) {}; void signals_unblock(void) {}; @@ -99,6 +105,9 @@ static inline void debug_log_dummy(void) {} // etc. #define RATES_DETAIL 10000ULL +// ---------------------------------------------------------------------------- +// factor for calculating correct CPU time values depending on units of raw data +static unsigned int time_factor = 0; // ---------------------------------------------------------------------------- // to avoid reallocating too frequently, we can increase the number of spare @@ -107,7 +116,6 @@ static inline void debug_log_dummy(void) {} // having a lot of spares, increases the CPU utilization of the plugin. #define MAX_SPARE_FDS 1 - // ---------------------------------------------------------------------------- // command line options @@ -166,12 +174,10 @@ static size_t // metric. // the total system time, as reported by /proc/stat -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) static kernel_uint_t global_utime = 0, global_stime = 0, global_gtime = 0; -#endif // the normalization ratios, as calculated by normalize_utilization() double utime_fix_ratio = 1.0, @@ -1038,8 +1044,8 @@ static inline int read_proc_pid_status(struct pid_stat *p, void *ptr) { p->uid = proc_info->ki_uid; p->gid = proc_info->ki_groups[0]; - p->status_vmsize = proc_info->ki_size / 1024; // in kB - p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in kB + p->status_vmsize = proc_info->ki_size / 1024; // in KiB + p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in KiB // TODO: what about shared and swap memory on FreeBSD? return 1; #else @@ -1327,8 +1333,8 @@ cleanup: #endif } -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) -static inline int read_proc_stat() { +#ifndef __FreeBSD__ +static inline int read_global_time() { static char filename[FILENAME_MAX + 1] = ""; static procfile *ff = NULL; static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0; @@ -1386,10 +1392,50 @@ cleanup: return 0; } #else -static inline int read_proc_stat() { +static inline int read_global_time() { + static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, ntime_raw = 0; + static usec_t collected_usec = 0, last_collected_usec = 0; + long cp_time[CPUSTATES]; + + if (unlikely(CPUSTATES != 5)) { + goto cleanup; + } else { + static int mib[2] = {0, 0}; + + if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) { + goto cleanup; + } + } + + last_collected_usec = collected_usec; + collected_usec = now_monotonic_usec(); + + calls_counter++; + + // temporary - it is added global_ntime; + kernel_uint_t global_ntime = 0; + + incremental_rate(global_utime, utime_raw, cp_time[0] * 100LLU / system_hz, collected_usec, last_collected_usec); + incremental_rate(global_ntime, ntime_raw, cp_time[1] * 100LLU / system_hz, collected_usec, last_collected_usec); + incremental_rate(global_stime, stime_raw, cp_time[2] * 100LLU / system_hz, collected_usec, last_collected_usec); + + global_utime += global_ntime; + + if(unlikely(global_iterations_counter == 1)) { + global_utime = 0; + global_stime = 0; + global_gtime = 0; + } + + return 1; + +cleanup: + global_utime = 0; + global_stime = 0; + global_gtime = 0; return 0; } -#endif +#endif /* !__FreeBSD__ */ // ---------------------------------------------------------------------------- @@ -2289,7 +2335,7 @@ static int collect_data_for_all_processes(void) { size_t new_procbase_size; - int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC }; + int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_ALL }; if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) { error("sysctl error: Can't get processes data size"); return 0; @@ -2396,7 +2442,7 @@ static int collect_data_for_all_processes(void) { return 0; // we need /proc/stat to normalize the cpu consumption of the exited childs - read_proc_stat(); + read_global_time(); // build the process tree link_all_processes_to_their_parents(); @@ -2884,7 +2930,6 @@ void send_resource_usage_to_netdata(usec_t dt) { , update_every ); -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) fprintf(stdout, "CHART netdata.apps_fix '' 'Apps Plugin Normalization Ratios' 'percentage' apps.plugin netdata.apps_fix line 140002 %1$d\n" "DIMENSION utime '' absolute 1 %2$llu\n" @@ -2907,7 +2952,6 @@ void send_resource_usage_to_netdata(usec_t dt) { , update_every , RATES_DETAIL ); -#endif } @@ -2942,7 +2986,6 @@ void send_resource_usage_to_netdata(usec_t dt) { , targets_assignment_counter ); -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) fprintf(stdout, "BEGIN netdata.apps_fix %llu\n" "SET utime = %u\n" @@ -2975,10 +3018,8 @@ void send_resource_usage_to_netdata(usec_t dt) { , (unsigned int)(cminflt_fix_ratio * 100 * RATES_DETAIL) , (unsigned int)(cmajflt_fix_ratio * 100 * RATES_DETAIL) ); -#endif } -#if (ALL_PIDS_ARE_READ_INSTANTLY == 0) static void normalize_utilization(struct target *root) { struct target *w; @@ -2986,7 +3027,7 @@ static void normalize_utilization(struct target *root) { // here we try to eliminate them by disabling childs processing either for specific dimensions // or entirely. Of course, either way, we disable it just a single iteration. - kernel_uint_t max_time = processors * system_hz * RATES_DETAIL; + kernel_uint_t max_time = processors * time_factor * RATES_DETAIL; kernel_uint_t utime = 0, cutime = 0, stime = 0, cstime = 0, gtime = 0, cgtime = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0; if(global_utime > max_time) global_utime = max_time; @@ -3009,7 +3050,7 @@ static void normalize_utilization(struct target *root) { cmajflt += w->cmajflt; } - if((global_utime || global_stime || global_gtime) && (utime || stime || gtime)) { + if(global_utime || global_stime || global_gtime) { if(global_utime + global_stime + global_gtime > utime + cutime + stime + cstime + gtime + cgtime) { // everything we collected fits utime_fix_ratio = @@ -3019,7 +3060,7 @@ static void normalize_utilization(struct target *root) { cstime_fix_ratio = cgtime_fix_ratio = 1.0; //(double)(global_utime + global_stime) / (double)(utime + cutime + stime + cstime); } - else if(global_utime + global_stime > utime + stime) { + else if((global_utime + global_stime > utime + stime) && (cutime || cstime)) { // childrens resources are too high // lower only the children resources utime_fix_ratio = @@ -3029,7 +3070,7 @@ static void normalize_utilization(struct target *root) { cstime_fix_ratio = cgtime_fix_ratio = (double)((global_utime + global_stime) - (utime + stime)) / (double)(cutime + cstime); } - else { + else if(utime || stime) { // even running processes are unrealistic // zero the children resources // lower the running processes resources @@ -3040,6 +3081,14 @@ static void normalize_utilization(struct target *root) { cstime_fix_ratio = cgtime_fix_ratio = 0.0; } + else { + utime_fix_ratio = + stime_fix_ratio = + gtime_fix_ratio = + cutime_fix_ratio = + cstime_fix_ratio = + cgtime_fix_ratio = 0.0; + } } else { utime_fix_ratio = @@ -3121,11 +3170,6 @@ static void normalize_utilization(struct target *root) { , (kernel_uint_t)(cgtime * cgtime_fix_ratio) ); } -#else // ALL_PIDS_ARE_READ_INSTANTLY == 1 -static void normalize_utilization(struct target *root) { - (void)root; -} -#endif // ALL_PIDS_ARE_READ_INSTANTLY static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt) { struct target *w; @@ -3196,7 +3240,7 @@ static void send_collected_data_to_netdata(struct target *root, const char *type } send_END(); #endif - + send_BEGIN(type, "minor_faults", dt); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) @@ -3290,19 +3334,19 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type // we have something new to show // update the charts - fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu stacked 20001 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); + fprintf(stdout, "CHART %s.cpu '' '%s CPU Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu stacked 20001 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, system_hz * RATES_DETAIL / 100, w->hidden ? "hidden" : ""); + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu %s\n", w->name, time_factor * RATES_DETAIL / 100, w->hidden ? "hidden" : ""); } - fprintf(stdout, "CHART %s.mem '' '%s Real Memory (w/o shared)' 'MB' mem %s.mem stacked 20003 %d\n", type, title, type, update_every); + fprintf(stdout, "CHART %s.mem '' '%s Real Memory (w/o shared)' 'MiB' mem %s.mem stacked 20003 %d\n", type, title, type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); } - fprintf(stdout, "CHART %s.vmem '' '%s Virtual Memory Size' 'MB' mem %s.vmem stacked 20005 %d\n", type, title, type, update_every); + fprintf(stdout, "CHART %s.vmem '' '%s Virtual Memory Size' 'MiB' mem %s.vmem stacked 20005 %d\n", type, title, type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); @@ -3320,28 +3364,28 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); } - fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_user stacked 20020 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); + fprintf(stdout, "CHART %s.cpu_user '' '%s CPU User Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_user stacked 20020 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU); + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU); } - fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20021 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); + fprintf(stdout, "CHART %s.cpu_system '' '%s CPU System Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_system stacked 20021 %d\n", type, title, (processors * 100), processors, (processors>1)?"s":"", type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU); + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU); } if(show_guest_time) { - fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (%d%% = %d core%s)' 'cpu time %%' cpu %s.cpu_system stacked 20022 %d\n", type, title, (processors * 100), processors, (processors > 1) ? "s" : "", type, update_every); + fprintf(stdout, "CHART %s.cpu_guest '' '%s CPU Guest Time (%d%% = %d core%s)' 'percentage' cpu %s.cpu_system stacked 20022 %d\n", type, title, (processors * 100), processors, (processors > 1) ? "s" : "", type, update_every); for (w = root; w; w = w->next) { if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, system_hz * RATES_DETAIL / 100LLU); + fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, time_factor * RATES_DETAIL / 100LLU); } } #ifndef __FreeBSD__ - fprintf(stdout, "CHART %s.swap '' '%s Swap Memory' 'MB' swap %s.swap stacked 20011 %d\n", type, title, type, update_every); + fprintf(stdout, "CHART %s.swap '' '%s Swap Memory' 'MiB' swap %s.swap stacked 20011 %d\n", type, title, type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) fprintf(stdout, "DIMENSION %s '' absolute %ld %ld\n", w->name, 1L, 1024L); @@ -3373,25 +3417,25 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, RATES_DETAIL); } #else - fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'kilobytes/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every); + fprintf(stdout, "CHART %s.preads '' '%s Disk Reads' 'KiB/s' disk %s.preads stacked 20002 %d\n", type, title, type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); } - fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'kilobytes/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every); + fprintf(stdout, "CHART %s.pwrites '' '%s Disk Writes' 'KiB/s' disk %s.pwrites stacked 20002 %d\n", type, title, type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); } - fprintf(stdout, "CHART %s.lreads '' '%s Disk Logical Reads' 'kilobytes/s' disk %s.lreads stacked 20042 %d\n", type, title, type, update_every); + fprintf(stdout, "CHART %s.lreads '' '%s Disk Logical Reads' 'KiB/s' disk %s.lreads stacked 20042 %d\n", type, title, type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); } - fprintf(stdout, "CHART %s.lwrites '' '%s I/O Logical Writes' 'kilobytes/s' disk %s.lwrites stacked 20042 %d\n", type, title, type, update_every); + fprintf(stdout, "CHART %s.lwrites '' '%s I/O Logical Writes' 'KiB/s' disk %s.lwrites stacked 20042 %d\n", type, title, type, update_every); for (w = root; w ; w = w->next) { if(unlikely(w->exposed)) fprintf(stdout, "DIMENSION %s '' absolute 1 %llu\n", w->name, 1024LLU * RATES_DETAIL); @@ -3710,7 +3754,14 @@ int main(int argc, char **argv) { procfile_adaptive_initial_allocation = 1; time_t started_t = now_monotonic_sec(); + get_system_HZ(); +#ifdef __FreeBSD__ + time_factor = 1000000ULL / RATES_DETAIL; // FreeBSD uses usecs +#else + time_factor = system_hz; // Linux uses clock ticks +#endif + get_system_pid_max(); get_system_cpus(); diff --git a/collectors/cgroups.plugin/Makefile.in b/collectors/cgroups.plugin/Makefile.in deleted file mode 100644 index 49c3c9834..000000000 --- a/collectors/cgroups.plugin/Makefile.in +++ /dev/null @@ -1,563 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \ - $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \ - $(dist_noinst_DATA) -subdir = collectors/cgroups.plugin -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(pluginsdir)" -SCRIPTS = $(dist_plugins_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - cgroup-name.sh \ - $(NULL) - -SUFFIXES = .in -dist_plugins_SCRIPTS = \ - cgroup-name.sh \ - cgroup-network-helper.sh \ - $(NULL) - -dist_noinst_DATA = \ - cgroup-name.sh.in \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -.SUFFIXES: .in -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/cgroups.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/build/subst.inc: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pluginsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pluginsdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_pluginsSCRIPTS - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_pluginsSCRIPTS - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_pluginsSCRIPTS install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-ps install-ps-am \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ - uninstall-am uninstall-dist_pluginsSCRIPTS - -.in: - if sed \ - -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ - -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ - -e 's#[@]configdir_POST@#$(configdir)#g' \ - -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ - -e 's#[@]cachedir_POST@#$(cachedir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md index 47eeebc53..d4f6d8ce0 100644 --- a/collectors/cgroups.plugin/README.md +++ b/collectors/cgroups.plugin/README.md @@ -32,7 +32,7 @@ Linux exposes resource usage reporting and provides dynamic configuration for cg path to /sys/fs/cgroup/blkio = /sys/fs/cgroup/blkio path to /sys/fs/cgroup/memory = /sys/fs/cgroup/memory path to /sys/fs/cgroup/devices = /sys/fs/cgroup/devices -``` +``` netdata rescans these directories for added or removed cgroups every `check for new cgroups every` seconds. @@ -51,7 +51,7 @@ To provide a sane default for this setting, netdata uses the following pattern l ``` [plugin:cgroups] - search for cgroups in subpaths matching = !*/init.scope !*-qemu !/init.scope !/system !/systemd !/user !/user.slice * + search for cgroups in subpaths matching = !*/init.scope !*-qemu !/init.scope !/system !/systemd !/user !/user.slice * ``` So, we disable checking for **child cgroups** in systemd internal cgroups ([systemd services are monitored by netdata](#monitoring-systemd-services)), user cgroups (normally used for desktop and remote user sessions), qemu virtual machines (child cgroups of virtual machines) and `init.scope`. All others are enabled. @@ -70,7 +70,7 @@ To provide a sane default, netdata uses the following pattern list (it checks th ``` [plugin:cgroups] - enable by default cgroups matching = !*/init.scope *.scope !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.slice !*.swap !*.user !/ !/docker !/libvirt !/lxc !/lxc/*/ns !/lxc/*/ns/* !/machine !/qemu !/system !/systemd !/user * + enable by default cgroups matching = !*/init.scope *.scope !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.slice !*.swap !*.user !/ !/docker !/libvirt !/lxc !/lxc/*/ns !/lxc/*/ns/* !/machine !/qemu !/system !/systemd !/user * ``` The above provides the default `yes` or `no` setting for the cgroup. However, there is an additional step. In many cases the cgroups found in the `/sys/fs/cgroup` hierarchy are just random numbers and in many cases these numbers are ephemeral: they change across reboots or sessions. @@ -158,6 +158,13 @@ cgroup_enable=memory swapaccount=1 You can add the above, directly at the `linux` line in your `/boot/grub/grub.cfg` or appending them to the `GRUB_CMDLINE_LINUX` in `/etc/default/grub` (in which case you will have to run `update-grub` before rebooting). On DigitalOcean debian images you may have to set it at `/etc/default/grub.d/50-cloudimg-settings.cfg`. +Which systemd services are monitored by netdata is determined by the following pattern list: + +``` +[plugin:cgroups] + cgroups to match as systemd services = !/system.slice/*/*.service /system.slice/*.service +``` + --- ## Monitoring ephemeral containers @@ -185,3 +192,5 @@ So, when a network interface or container stops, netdata might log a few errors 6. obsolete charts will be removed from memory, 1 hour after the last user viewed them (configurable with `[global].cleanup obsolete charts after seconds = 3600` (at netdata.conf). 7. when obsolete charts are removed from memory they are also deleted from disk (configurable with `[global].delete obsolete charts files = yes`) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcgroups.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/cgroups.plugin/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh deleted file mode 100644 index 6bf8b8b03..000000000 --- a/collectors/cgroups.plugin/cgroup-name.sh +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env bash - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2016 Costa Tsaousis -# SPDX-License-Identifier: GPL-3.0-or-later -# -# Script to find a better name for cgroups -# - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" -export LC_ALL=C - -# ----------------------------------------------------------------------------- - -PROGRAM_NAME="$(basename "${0}")" - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - exit 1 -} - -debug=0 -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" -} - -# ----------------------------------------------------------------------------- - -[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" -[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" - -DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}" -CGROUP="${1}" -NAME= - -# ----------------------------------------------------------------------------- - -if [ -z "${CGROUP}" ] - then - fatal "called without a cgroup name. Nothing to do." -fi - -for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf" -do - if [ -f "${CONFIG}" ] - then - NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)" - if [ -z "${NAME}" ] - then - info "cannot find cgroup '${CGROUP}' in '${CONFIG}'." - else - break - fi - #else - # info "configuration file '${CONFIG}' is not available." - fi -done - -function docker_get_name_classic { - local id="${1}" - info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\"" - NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )" - return 0 -} - -function docker_get_name_api { - local id="${1}" - if [ ! -S "${DOCKER_HOST}" ] - then - warning "Can't find ${DOCKER_HOST}" - return 1 - fi - info "Running API command: /containers/${id}/json" - JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U ${DOCKER_HOST} | grep '^{.*') - NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||') - return 0 -} - -function docker_get_name { - local id="${1}" - if hash docker 2>/dev/null - then - docker_get_name_classic "${id}" - else - docker_get_name_api "${id}" || docker_get_name_classic "${id}" - fi - if [ -z "${NAME}" ] - then - warning "cannot find the name of docker container '${id}'" - NAME="${id:0:12}" - else - info "docker container '${id}' is named '${NAME}'" - fi -} - -if [ -z "${NAME}" ] - then - if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]] - then - # docker containers - - DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )" - # echo "DOCKERID=${DOCKERID}" - - if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] - then - docker_get_name "${DOCKERID}" - else - error "a docker id cannot be extracted from docker cgroup '${CGROUP}'." - fi - elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]] - then - # kubernetes - - DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )" - # echo "DOCKERID=${DOCKERID}" - - if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] - then - docker_get_name "${DOCKERID}" - else - error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'." - fi - elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]] - then - # systemd-nspawn - - NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')" - - elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]] - then - # libvirtd / qemu virtual machines - - # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')" - NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')" - - elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]] - then - # libvirtd / qemu virtual machines - NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')" - - elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]] - then - # Proxmox VMs - - FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" - if [[ -f $FILENAME && -r $FILENAME ]] - then - NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')" - else - error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." - fi - elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]] - then - # Proxmox Containers (LXC) - - FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf" - if [[ -f ${FILENAME} && -r ${FILENAME} ]] - then - NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p') - else - error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." - fi - fi - - [ -z "${NAME}" ] && NAME="${CGROUP}" - [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}" -fi - -info "cgroup '${CGROUP}' is called '${NAME}'" -echo "${NAME}" diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in index 53696a4bf..3aebe2bf4 100755 --- a/collectors/cgroups.plugin/cgroup-name.sh.in +++ b/collectors/cgroups.plugin/cgroup-name.sh.in @@ -1,4 +1,5 @@ #!/usr/bin/env bash +#shellcheck disable=SC2001 # netdata # real-time performance and health monitoring, done right! @@ -16,42 +17,80 @@ export LC_ALL=C PROGRAM_NAME="$(basename "${0}")" logdate() { - date "+%Y-%m-%d %H:%M:%S" + date "+%Y-%m-%d %H:%M:%S" } log() { - local status="${1}" - shift + local status="${1}" + shift - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" } warning() { - log WARNING "${@}" + log WARNING "${@}" } error() { - log ERROR "${@}" + log ERROR "${@}" } info() { - log INFO "${@}" + log INFO "${@}" } fatal() { - log FATAL "${@}" - exit 1 + log FATAL "${@}" + exit 1 } -debug=0 -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" +function docker_get_name_classic() { + local id="${1}" + info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\"" + NAME="$(docker ps --filter=id="${id}" --format="{{.Names}}")" + return 0 +} + +function docker_get_name_api() { + local id="${1}" + if [ ! -S "${DOCKER_HOST}" ]; then + warning "Can't find ${DOCKER_HOST}" + return 1 + fi + info "Running API command: /containers/${id}/json" + JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\\r\\n" | nc -U "${DOCKER_HOST}" | grep '^{.*') + NAME=$(echo "$JSON" | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||') + return 0 +} + +function docker_get_name() { + local id="${1}" + if hash docker 2>/dev/null; then + docker_get_name_classic "${id}" + else + docker_get_name_api "${id}" || docker_get_name_classic "${id}" + fi + if [ -z "${NAME}" ]; then + warning "cannot find the name of docker container '${id}'" + NAME="${id:0:12}" + else + info "docker container '${id}' is named '${NAME}'" + fi +} + +function docker_validate_id() { + local id="${1}" + if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then + docker_get_name "${id}" + else + error "a docker id cannot be extracted from docker cgroup '${CGROUP}'." + fi } # ----------------------------------------------------------------------------- -[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" [ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}" @@ -60,136 +99,77 @@ NAME= # ----------------------------------------------------------------------------- -if [ -z "${CGROUP}" ] - then - fatal "called without a cgroup name. Nothing to do." +if [ -z "${CGROUP}" ]; then + fatal "called without a cgroup name. Nothing to do." fi -for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf" -do - if [ -f "${CONFIG}" ] - then - NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed "s/[[:space:]]\+/ /g" | cut -d ' ' -f 2)" - if [ -z "${NAME}" ] - then - info "cannot find cgroup '${CGROUP}' in '${CONFIG}'." - else - break - fi - #else - # info "configuration file '${CONFIG}' is not available." - fi +for CONFIG in "${NETDATA_USER_CONFIG_DIR}/cgroups-names.conf" "${NETDATA_STOCK_CONFIG_DIR}/cgroups-names.conf"; do + if [ -f "${CONFIG}" ]; then + NAME="$(grep "^${CGROUP} " "${CONFIG}" | sed 's/[[:space:]]\+/ /g' | cut -d ' ' -f 2)" + if [ -z "${NAME}" ]; then + info "cannot find cgroup '${CGROUP}' in '${CONFIG}'." + else + break + fi + #else + # info "configuration file '${CONFIG}' is not available." + fi done -function docker_get_name_classic { - local id="${1}" - info "Running command: docker ps --filter=id=\"${id}\" --format=\"{{.Names}}\"" - NAME="$( docker ps --filter=id="${id}" --format="{{.Names}}" )" - return 0 -} - -function docker_get_name_api { - local id="${1}" - if [ ! -S "${DOCKER_HOST}" ] - then - warning "Can't find ${DOCKER_HOST}" - return 1 - fi - info "Running API command: /containers/${id}/json" - JSON=$(echo -e "GET /containers/${id}/json HTTP/1.0\r\n" | nc -U ${DOCKER_HOST} | grep '^{.*') - NAME=$(echo $JSON | jq -r .Name,.Config.Hostname | grep -v null | head -n1 | sed 's|^/||') - return 0 -} - -function docker_get_name { - local id="${1}" - if hash docker 2>/dev/null - then - docker_get_name_classic "${id}" - else - docker_get_name_api "${id}" || docker_get_name_classic "${id}" - fi - if [ -z "${NAME}" ] - then - warning "cannot find the name of docker container '${id}'" - NAME="${id:0:12}" - else - info "docker container '${id}' is named '${NAME}'" - fi -} - -if [ -z "${NAME}" ] - then - if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]] - then - # docker containers - - DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )" - # echo "DOCKERID=${DOCKERID}" - - if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] - then - docker_get_name "${DOCKERID}" - else - error "a docker id cannot be extracted from docker cgroup '${CGROUP}'." - fi - elif [[ "${CGROUP}" =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]] - then - # kubernetes - - DOCKERID="$( echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|" )" - # echo "DOCKERID=${DOCKERID}" - - if [ ! -z "${DOCKERID}" -a \( ${#DOCKERID} -eq 64 -o ${#DOCKERID} -eq 12 \) ] - then - docker_get_name "${DOCKERID}" - else - error "a docker id cannot be extracted from kubernetes cgroup '${CGROUP}'." - fi - elif [[ "${CGROUP}" =~ machine.slice[_/].*\.service ]] - then - # systemd-nspawn - - NAME="$(echo ${CGROUP} | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')" - - elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]] - then - # libvirtd / qemu virtual machines - - # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')" - NAME="qemu_$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')" - - elif [[ "${CGROUP}" =~ machine_.*\.libvirt-qemu ]] - then - # libvirtd / qemu virtual machines - NAME="qemu_$(echo ${CGROUP} | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')" - - elif [[ "${CGROUP}" =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]] - then - # Proxmox VMs - - FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" - if [[ -f $FILENAME && -r $FILENAME ]] - then - NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')" - else - error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." - fi - elif [[ "${CGROUP}" =~ lxc_([0-9]+) && -d /etc/pve ]] - then - # Proxmox Containers (LXC) - - FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf" - if [[ -f ${FILENAME} && -r ${FILENAME} ]] - then - NAME=$(grep -e '^hostname: ' /etc/pve/lxc/${BASH_REMATCH[1]}.conf | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p') - else - error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." - fi - fi - - [ -z "${NAME}" ] && NAME="${CGROUP}" - [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}" +if [ -z "${NAME}" ]; then + if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then + # docker containers + #shellcheck disable=SC1117 + DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")" + docker_validate_id "${DOCKERID}" + + elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then + # ECS + #shellcheck disable=SC1117 + DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")" + docker_validate_id "${DOCKERID}" + + elif [[ ${CGROUP} =~ ^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]+[_/][a-fA-F0-9]+$ ]]; then + # kubernetes + #shellcheck disable=SC1117 + DOCKERID="$(echo "${CGROUP}" | sed "s|^.*kubepods[_/].*[_/]pod[a-fA-F0-9-]\+[_/]\([a-fA-F0-9]\+\)$|\1|")" + docker_validate_id "${DOCKERID}" + + elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then + # systemd-nspawn + NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')" + + elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then + # libvirtd / qemu virtual machines + # NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')" + NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/\/x2d[[:digit:]]*//; s/\/x2d//g; s/\.scope//g')" + + elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then + # libvirtd / qemu virtual machines + NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')" + + elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]; then + # Proxmox VMs + + FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" + if [[ -f $FILENAME && -r $FILENAME ]]; then + NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')" + else + error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." + fi + elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d /etc/pve ]]; then + # Proxmox Containers (LXC) + + FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf" + if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then + NAME=$(grep -e '^hostname: ' "/etc/pve/lxc/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p') + else + error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group." + fi + fi + + [ -z "${NAME}" ] && NAME="${CGROUP}" + [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}" fi info "cgroup '${CGROUP}' is called '${NAME}'" diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c index 0cf2a2633..5aeb9a59a 100644 --- a/collectors/cgroups.plugin/cgroup-network.c +++ b/collectors/cgroups.plugin/cgroup-network.c @@ -24,6 +24,13 @@ void netdata_cleanup_and_exit(int ret) { exit(ret); } +void send_statistics( const char *action, const char *action_result, const char *action_data) { + (void) action; + (void) action_result; + (void) action_data; + return; +} + // callbacks required by popen() void signals_block(void) {}; void signals_unblock(void) {}; diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c index 9c0fd7f43..f8e5167ff 100644 --- a/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -1489,7 +1489,7 @@ void update_systemd_services_charts( , "services.mem_usage" , (cgroup_used_memory_without_cache) ? "Systemd Services Used Memory without Cache" : "Systemd Services Used Memory" - , "MB" + , "MiB" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10 @@ -1512,7 +1512,7 @@ void update_systemd_services_charts( , "mem" , "services.mem_rss" , "Systemd Services RSS Memory" - , "MB" + , "MiB" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20 @@ -1533,7 +1533,7 @@ void update_systemd_services_charts( , "mem" , "services.mem_mapped" , "Systemd Services Mapped Memory" - , "MB" + , "MiB" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30 @@ -1554,7 +1554,7 @@ void update_systemd_services_charts( , "mem" , "services.mem_cache" , "Systemd Services Cache Memory" - , "MB" + , "MiB" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40 @@ -1575,7 +1575,7 @@ void update_systemd_services_charts( , "mem" , "services.mem_writeback" , "Systemd Services Writeback Memory" - , "MB" + , "MiB" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50 @@ -1596,7 +1596,7 @@ void update_systemd_services_charts( , "mem" , "services.mem_pgfault" , "Systemd Services Memory Minor Page Faults" - , "MB/s" + , "MiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60 @@ -1616,7 +1616,7 @@ void update_systemd_services_charts( , "mem" , "services.mem_pgmajfault" , "Systemd Services Memory Major Page Faults" - , "MB/s" + , "MiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70 @@ -1637,7 +1637,7 @@ void update_systemd_services_charts( , "mem" , "services.mem_pgpgin" , "Systemd Services Memory Charging Activity" - , "MB/s" + , "MiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 80 @@ -1658,7 +1658,7 @@ void update_systemd_services_charts( , "mem" , "services.mem_pgpgout" , "Systemd Services Memory Uncharging Activity" - , "MB/s" + , "MiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 90 @@ -1681,7 +1681,7 @@ void update_systemd_services_charts( , "mem" , "services.mem_failcnt" , "Systemd Services Memory Limit Failures" - , "MB" + , "failures" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 110 @@ -1704,7 +1704,7 @@ void update_systemd_services_charts( , "swap" , "services.swap_usage" , "Systemd Services Swap Memory Used" - , "MB" + , "MiB" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 100 @@ -1727,7 +1727,7 @@ void update_systemd_services_charts( , "disk" , "services.io_read" , "Systemd Services Disk Read Bandwidth" - , "KB/s" + , "KiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 120 @@ -1748,7 +1748,7 @@ void update_systemd_services_charts( , "disk" , "services.io_write" , "Systemd Services Disk Write Bandwidth" - , "KB/s" + , "KiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 130 @@ -1815,7 +1815,7 @@ void update_systemd_services_charts( , "disk" , "services.throttle_io_read" , "Systemd Services Throttle Disk Read Bandwidth" - , "KB/s" + , "KiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 160 @@ -1836,7 +1836,7 @@ void update_systemd_services_charts( , "disk" , "services.throttle_io_write" , "Systemd Services Throttle Disk Write Bandwidth" - , "KB/s" + , "KiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 170 @@ -2317,7 +2317,7 @@ void update_cgroup_charts(int update_every) { , "mem" , "cgroup.mem" , title - , "MB" + , "MiB" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 210 @@ -2357,7 +2357,7 @@ void update_cgroup_charts(int update_every) { , "mem" , "cgroup.writeback" , title - , "MB" + , "MiB" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 300 @@ -2389,7 +2389,7 @@ void update_cgroup_charts(int update_every) { , "mem" , "cgroup.mem_activity" , title - , "MB/s" + , "MiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 400 @@ -2417,7 +2417,7 @@ void update_cgroup_charts(int update_every) { , "mem" , "cgroup.pgfaults" , title - , "MB/s" + , "MiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 500 @@ -2447,7 +2447,7 @@ void update_cgroup_charts(int update_every) { , "mem" , "cgroup.mem_usage" , title - , "MB" + , "MiB" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 200 @@ -2505,7 +2505,7 @@ void update_cgroup_charts(int update_every) { , "disk" , "cgroup.io" , title - , "KB/s" + , "KiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200 @@ -2565,7 +2565,7 @@ void update_cgroup_charts(int update_every) { , "disk" , "cgroup.throttle_io" , title - , "KB/s" + , "KiB/s" , PLUGIN_CGROUPS_NAME , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME , NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 1200 diff --git a/collectors/charts.d.plugin/.keep b/collectors/charts.d.plugin/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am index e2e00258f..2989b4b8d 100644 --- a/collectors/charts.d.plugin/Makefile.am +++ b/collectors/charts.d.plugin/Makefile.am @@ -32,12 +32,11 @@ dist_charts_DATA = \ userchartsconfigdir=$(configdir)/charts.d dist_userchartsconfig_DATA = \ - $(top_srcdir)/installer/.keep \ + .keep \ $(NULL) chartsconfigdir=$(libconfigdir)/charts.d dist_chartsconfig_DATA = \ - $(top_srcdir)/installer/.keep \ $(NULL) include ap/Makefile.inc diff --git a/collectors/charts.d.plugin/Makefile.in b/collectors/charts.d.plugin/Makefile.in deleted file mode 100644 index 23e2edebb..000000000 --- a/collectors/charts.d.plugin/Makefile.in +++ /dev/null @@ -1,953 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc \ - $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc \ - $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \ - $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \ - $(srcdir)/hddtemp/Makefile.inc \ - $(srcdir)/libreswan/Makefile.inc \ - $(srcdir)/load_average/Makefile.inc \ - $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc \ - $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc \ - $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc \ - $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc \ - $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc \ - $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS) \ - $(dist_charts_DATA) $(dist_chartsconfig_DATA) \ - $(dist_libconfig_DATA) $(dist_noinst_DATA) \ - $(dist_userchartsconfig_DATA) -subdir = collectors/charts.d.plugin -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" \ - "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" \ - "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)" -SCRIPTS = $(dist_charts_SCRIPTS) $(dist_plugins_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_charts_DATA) $(dist_chartsconfig_DATA) \ - $(dist_libconfig_DATA) $(dist_noinst_DATA) \ - $(dist_userchartsconfig_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - charts.d.plugin \ - $(NULL) - -SUFFIXES = .in -dist_libconfig_DATA = \ - charts.d.conf \ - $(NULL) - -dist_plugins_SCRIPTS = \ - charts.d.dryrun-helper.sh \ - charts.d.plugin \ - loopsleepms.sh.inc \ - $(NULL) - - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution -dist_noinst_DATA = charts.d.plugin.in README.md $(NULL) ap/README.md \ - ap/Makefile.inc apache/README.md apache/Makefile.inc \ - apcupsd/README.md apcupsd/Makefile.inc cpu_apps/README.md \ - cpu_apps/Makefile.inc cpufreq/README.md cpufreq/Makefile.inc \ - example/README.md example/Makefile.inc exim/README.md \ - exim/Makefile.inc hddtemp/README.md hddtemp/Makefile.inc \ - libreswan/README.md libreswan/Makefile.inc \ - load_average/README.md load_average/Makefile.inc \ - mem_apps/README.md mem_apps/Makefile.inc mysql/README.md \ - mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \ - nut/README.md nut/Makefile.inc opensips/README.md \ - opensips/Makefile.inc phpfpm/README.md phpfpm/Makefile.inc \ - postfix/README.md postfix/Makefile.inc sensors/README.md \ - sensors/Makefile.inc squid/README.md squid/Makefile.inc \ - tomcat/README.md tomcat/Makefile.inc -dist_charts_SCRIPTS = \ - $(NULL) - - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files -dist_charts_DATA = $(NULL) ap/ap.chart.sh apache/apache.chart.sh \ - apcupsd/apcupsd.chart.sh cpu_apps/cpu_apps.chart.sh \ - cpufreq/cpufreq.chart.sh example/example.chart.sh \ - exim/exim.chart.sh hddtemp/hddtemp.chart.sh \ - libreswan/libreswan.chart.sh \ - load_average/load_average.chart.sh mem_apps/mem_apps.chart.sh \ - mysql/mysql.chart.sh nginx/nginx.chart.sh nut/nut.chart.sh \ - opensips/opensips.chart.sh phpfpm/phpfpm.chart.sh \ - postfix/postfix.chart.sh sensors/sensors.chart.sh \ - squid/squid.chart.sh tomcat/tomcat.chart.sh -userchartsconfigdir = $(configdir)/charts.d -dist_userchartsconfig_DATA = \ - $(top_srcdir)/installer/.keep \ - $(NULL) - -chartsconfigdir = $(libconfigdir)/charts.d -dist_chartsconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \ - ap/ap.conf apache/apache.conf apcupsd/apcupsd.conf \ - cpu_apps/cpu_apps.conf cpufreq/cpufreq.conf \ - example/example.conf exim/exim.conf hddtemp/hddtemp.conf \ - libreswan/libreswan.conf load_average/load_average.conf \ - mem_apps/mem_apps.conf mysql/mysql.conf nginx/nginx.conf \ - nut/nut.conf opensips/opensips.conf phpfpm/phpfpm.conf \ - postfix/postfix.conf sensors/sensors.conf squid/squid.conf \ - tomcat/tomcat.conf -all: all-am - -.SUFFIXES: -.SUFFIXES: .in -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/charts.d.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/build/subst.inc $(srcdir)/ap/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/apcupsd/Makefile.inc $(srcdir)/cpu_apps/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/libreswan/Makefile.inc $(srcdir)/load_average/Makefile.inc $(srcdir)/mem_apps/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nut/Makefile.inc $(srcdir)/opensips/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_chartsSCRIPTS: $(dist_charts_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(chartsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(chartsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_chartsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_charts_SCRIPTS)'; test -n "$(chartsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir) -install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pluginsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) -install-dist_chartsDATA: $(dist_charts_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(chartsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(chartsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsdir)" || exit $$?; \ - done - -uninstall-dist_chartsDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_charts_DATA)'; test -n "$(chartsdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(chartsdir)'; $(am__uninstall_files_from_dir) -install-dist_chartsconfigDATA: $(dist_chartsconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(chartsconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(chartsconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(chartsconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(chartsconfigdir)" || exit $$?; \ - done - -uninstall-dist_chartsconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_chartsconfig_DATA)'; test -n "$(chartsconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(chartsconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_libconfigDATA: $(dist_libconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ - done - -uninstall-dist_libconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_userchartsconfigDATA: $(dist_userchartsconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(userchartsconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(userchartsconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userchartsconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(userchartsconfigdir)" || exit $$?; \ - done - -uninstall-dist_userchartsconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_userchartsconfig_DATA)'; test -n "$(userchartsconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(userchartsconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(chartsdir)" "$(DESTDIR)$(chartsconfigdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(userchartsconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_chartsDATA install-dist_chartsSCRIPTS \ - install-dist_chartsconfigDATA install-dist_libconfigDATA \ - install-dist_pluginsSCRIPTS install-dist_userchartsconfigDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \ - uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \ - uninstall-dist_pluginsSCRIPTS \ - uninstall-dist_userchartsconfigDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_chartsDATA \ - install-dist_chartsSCRIPTS install-dist_chartsconfigDATA \ - install-dist_libconfigDATA install-dist_pluginsSCRIPTS \ - install-dist_userchartsconfigDATA install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_chartsDATA uninstall-dist_chartsSCRIPTS \ - uninstall-dist_chartsconfigDATA uninstall-dist_libconfigDATA \ - uninstall-dist_pluginsSCRIPTS \ - uninstall-dist_userchartsconfigDATA - -.in: - if sed \ - -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ - -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ - -e 's#[@]configdir_POST@#$(configdir)#g' \ - -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ - -e 's#[@]cachedir_POST@#$(cachedir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md index b224bffe3..3d318f26c 100644 --- a/collectors/charts.d.plugin/README.md +++ b/collectors/charts.d.plugin/README.md @@ -191,3 +191,5 @@ This is what you need to do: Execute the above in this order, since netdata will (by default) attempt to start new plugins soon after they are created in `/usr/libexec/netdata/plugins.d/`. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md index eb4e80707..962a8565e 100644 --- a/collectors/charts.d.plugin/ap/README.md +++ b/collectors/charts.d.plugin/ap/README.md @@ -82,3 +82,5 @@ To edit this file on your system run `/etc/netdata/edit-config charts.d/ap.conf` ## Auto-detection The plugin is able to auto-detect if you are running access points on your linux box. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh index ccc36120c..a2d04c0a7 100644 --- a/collectors/charts.d.plugin/ap/ap.chart.sh +++ b/collectors/charts.d.plugin/ap/ap.chart.sh @@ -56,8 +56,7 @@ ap_check() { ap_create() { local ssid dev - for dev in "${!ap_devs[@]}" - do + for dev in "${!ap_devs[@]}"; do ssid="${ap_devs[${dev}]}" # create the chart with 3 dimensions @@ -99,14 +98,13 @@ ap_update() { # for each dimension # remember: KEEP IT SIMPLE AND SHORT - for dev in "${!ap_devs[@]}" - do - echo - echo "DEVICE ${dev}" + for dev in "${!ap_devs[@]}"; do + echo + echo "DEVICE ${dev}" iw "${dev}" station dump - done | awk " + done | awk ' function zero_data() { - dev = \"\"; + dev = ""; c = 0; rb = 0; tb = 0; @@ -121,32 +119,32 @@ ap_update() { e = 0; } function print_device() { - if(dev != \"\" && length(dev) > 0) { - print \"BEGIN ap_clients.\" dev; - print \"SET clients = \" c; - print \"END\"; - print \"BEGIN ap_bandwidth.\" dev; - print \"SET received = \" rb; - print \"SET sent = \" tb; - print \"END\"; - print \"BEGIN ap_packets.\" dev; - print \"SET received = \" rp; - print \"SET sent = \" tp; - print \"END\"; - print \"BEGIN ap_issues.\" dev; - print \"SET retries = \" tr; - print \"SET failures = \" tf; - print \"END\"; + if(dev != "" && length(dev) > 0) { + print "BEGIN ap_clients." dev; + print "SET clients = " c; + print "END"; + print "BEGIN ap_bandwidth." dev; + print "SET received = " rb; + print "SET sent = " tb; + print "END"; + print "BEGIN ap_packets." dev; + print "SET received = " rp; + print "SET sent = " tp; + print "END"; + print "BEGIN ap_issues." dev; + print "SET retries = " tr; + print "SET failures = " tf; + print "END"; if( c == 0 ) c = 1; - print \"BEGIN ap_signal.\" dev; - print \"SET signal = \" int(s / c); - print \"END\"; - print \"BEGIN ap_bitrate.\" dev; - print \"SET receive = \" int(rt / c); - print \"SET transmit = \" int(tt / c); - print \"SET expected = \" int(e / c); - print \"END\"; + print "BEGIN ap_signal." dev; + print "SET signal = " int(s / c); + print "END"; + print "BEGIN ap_bitrate." dev; + print "SET receive = " int(rt / c); + print "SET transmit = " int(tt / c); + print "SET expected = " int(e / c); + print "END"; } zero_data(); } @@ -155,28 +153,27 @@ ap_update() { } /^DEVICE / { print_device(); - dev = \$2; + dev = $2; } /^Station/ { c++; } - /^[ \\t]+rx bytes:/ { rb += \$3; } - /^[ \\t]+tx bytes:/ { tb += \$3; } - /^[ \\t]+rx packets:/ { rp += \$3; } - /^[ \\t]+tx packets:/ { tp += \$3; } - /^[ \\t]+tx retries:/ { tr += \$3; } - /^[ \\t]+tx failed:/ { tf += \$3; } - /^[ \\t]+signal:/ { x = \$2; s += x * 1000; } - /^[ \\t]+rx bitrate:/ { x = \$3; rt += x * 1000; } - /^[ \\t]+tx bitrate:/ { x = \$3; tt += x * 1000; } - /^[ \\t]+expected throughput:(.*)Mbps/ { - x=\$3; - sub(/Mbps/, \"\", x); + /^[ \t]+rx bytes:/ { rb += $3; } + /^[ \t]+tx bytes:/ { tb += $3; } + /^[ \t]+rx packets:/ { rp += $3; } + /^[ \t]+tx packets:/ { tp += $3; } + /^[ \t]+tx retries:/ { tr += $3; } + /^[ \t]+tx failed:/ { tf += $3; } + /^[ \t]+signal:/ { x = $2; s += x * 1000; } + /^[ \t]+rx bitrate:/ { x = $3; rt += x * 1000; } + /^[ \t]+tx bitrate:/ { x = $3; tt += x * 1000; } + /^[ \t]+expected throughput:(.*)Mbps/ { + x=$3; + sub(/Mbps/, "", x); e += x * 1000; } END { print_device(); } - " + ' return 0 } - diff --git a/collectors/charts.d.plugin/apache/README.md b/collectors/charts.d.plugin/apache/README.md index 890cee984..273979107 100644 --- a/collectors/charts.d.plugin/apache/README.md +++ b/collectors/charts.d.plugin/apache/README.md @@ -1,10 +1,10 @@ +# Apache + > THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT +> USE [THE PYTHON ONE](../../python.d.plugin/apache) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT --- -# Apache Plugin (apache) - The `apache` collector visualizes key performance data for an apache web server. ## Example netdata charts @@ -125,3 +125,5 @@ curl "http://127.0.0.1:80/server-status?auto" netdata will be able to do it too. Notice: You may need to have the default `000-default.conf ` website enabled in order for the status mod to work. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/apache/apache.chart.sh b/collectors/charts.d.plugin/apache/apache.chart.sh index 95876432f..7d09ee676 100644 --- a/collectors/charts.d.plugin/apache/apache.chart.sh +++ b/collectors/charts.d.plugin/apache/apache.chart.sh @@ -52,21 +52,20 @@ apache_key_connsasynckeepalive= apache_key_connsasyncclosing= apache_detect() { local i=0 - for x in "${@}" - do + for x in "${@}"; do case "${x}" in - 'Total Accesses') apache_key_accesses=$((i + 1)) ;; - 'Total kBytes') apache_key_kbytes=$((i + 1)) ;; - 'ReqPerSec') apache_key_reqpersec=$((i + 1)) ;; - 'BytesPerSec') apache_key_bytespersec=$((i + 1)) ;; - 'BytesPerReq') apache_key_bytesperreq=$((i + 1)) ;; - 'BusyWorkers') apache_key_busyworkers=$((i + 1)) ;; - 'IdleWorkers') apache_key_idleworkers=$((i + 1));; - 'ConnsTotal') apache_key_connstotal=$((i + 1)) ;; - 'ConnsAsyncWriting') apache_key_connsasyncwriting=$((i + 1)) ;; - 'ConnsAsyncKeepAlive') apache_key_connsasynckeepalive=$((i + 1)) ;; - 'ConnsAsyncClosing') apache_key_connsasyncclosing=$((i + 1)) ;; - 'Scoreboard') apache_key_scoreboard=$((i)) ;; + 'Total Accesses') apache_key_accesses=$((i + 1)) ;; + 'Total kBytes') apache_key_kbytes=$((i + 1)) ;; + 'ReqPerSec') apache_key_reqpersec=$((i + 1)) ;; + 'BytesPerSec') apache_key_bytespersec=$((i + 1)) ;; + 'BytesPerReq') apache_key_bytesperreq=$((i + 1)) ;; + 'BusyWorkers') apache_key_busyworkers=$((i + 1)) ;; + 'IdleWorkers') apache_key_idleworkers=$((i + 1)) ;; + 'ConnsTotal') apache_key_connstotal=$((i + 1)) ;; + 'ConnsAsyncWriting') apache_key_connsasyncwriting=$((i + 1)) ;; + 'ConnsAsyncKeepAlive') apache_key_connsasynckeepalive=$((i + 1)) ;; + 'ConnsAsyncClosing') apache_key_connsasyncclosing=$((i + 1)) ;; + 'Scoreboard') apache_key_scoreboard=$((i)) ;; esac i=$((i + 1)) @@ -74,20 +73,19 @@ apache_detect() { # we will not check of the Conns* # keys, since these are apache 2.4 specific - [ -z "${apache_key_accesses}" ] && error "missing 'Total Accesses' from apache server: ${*}" && return 1 - [ -z "${apache_key_kbytes}" ] && error "missing 'Total kBytes' from apache server: ${*}" && return 1 - [ -z "${apache_key_reqpersec}" ] && error "missing 'ReqPerSec' from apache server: ${*}" && return 1 + [ -z "${apache_key_accesses}" ] && error "missing 'Total Accesses' from apache server: ${*}" && return 1 + [ -z "${apache_key_kbytes}" ] && error "missing 'Total kBytes' from apache server: ${*}" && return 1 + [ -z "${apache_key_reqpersec}" ] && error "missing 'ReqPerSec' from apache server: ${*}" && return 1 [ -z "${apache_key_bytespersec}" ] && error "missing 'BytesPerSec' from apache server: ${*}" && return 1 [ -z "${apache_key_bytesperreq}" ] && error "missing 'BytesPerReq' from apache server: ${*}" && return 1 [ -z "${apache_key_busyworkers}" ] && error "missing 'BusyWorkers' from apache server: ${*}" && return 1 [ -z "${apache_key_idleworkers}" ] && error "missing 'IdleWorkers' from apache server: ${*}" && return 1 - [ -z "${apache_key_scoreboard}" ] && error "missing 'Scoreboard' from apache server: ${*}" && return 1 + [ -z "${apache_key_scoreboard}" ] && error "missing 'Scoreboard' from apache server: ${*}" && return 1 - if [ ! -z "${apache_key_connstotal}" ] && \ - [ ! -z "${apache_key_connsasyncwriting}" ] && \ - [ ! -z "${apache_key_connsasynckeepalive}" ] && \ - [ ! -z "${apache_key_connsasyncclosing}" ] - then + if [ ! -z "${apache_key_connstotal}" ] && + [ ! -z "${apache_key_connsasyncwriting}" ] && + [ ! -z "${apache_key_connsasynckeepalive}" ] && + [ ! -z "${apache_key_connsasyncclosing}" ]; then apache_has_conns=1 else apache_has_conns=0 @@ -103,15 +101,13 @@ apache_get() { ret=$? IFS="${oIFS}" - if [ $ret -ne 0 ] || [ "${#apache_response[@]}" -eq 0 ] - then + if [ $ret -ne 0 ] || [ "${#apache_response[@]}" -eq 0 ]; then return 1 fi # the last line on the apache output is "Scoreboard" # we use this label to detect that the output has a new word count - if [ ${apache_keys_detected} -eq 0 ] || [ "${apache_response[${apache_key_scoreboard}]}" != "Scoreboard" ] - then + if [ ${apache_keys_detected} -eq 0 ] || [ "${apache_response[${apache_key_scoreboard}]}" != "Scoreboard" ]; then apache_detect "${apache_response[@]}" || return 1 apache_keys_detected=1 fi @@ -131,20 +127,20 @@ apache_get() { apache_busyworkers="${apache_response[${apache_key_busyworkers}]}" apache_idleworkers="${apache_response[${apache_key_idleworkers}]}" - if [ -z "${apache_accesses}" ] || \ - [ -z "${apache_kbytes}" ] || \ - [ -z "${apache_reqpersec}" ] || \ - [ -z "${apache_bytespersec}" ] || \ - [ -z "${apache_bytesperreq}" ] || \ - [ -z "${apache_busyworkers}" ] - [ -z "${apache_idleworkers}" ] + if + [ -z "${apache_accesses}" ] || + [ -z "${apache_kbytes}" ] || + [ -z "${apache_reqpersec}" ] || + [ -z "${apache_bytespersec}" ] || + [ -z "${apache_bytesperreq}" ] || + [ -z "${apache_busyworkers}" ] + [ -z "${apache_idleworkers}" ] then error "empty values got from apache server: ${apache_response[*]}" return 1 fi - if [ ${apache_has_conns} -eq 1 ] - then + if [ ${apache_has_conns} -eq 1 ]; then apache_connstotal="${apache_response[${apache_key_connstotal}]}" apache_connsasyncwriting="${apache_response[${apache_key_connsasyncwriting}]}" apache_connsasynckeepalive="${apache_response[${apache_key_connsasynckeepalive}]}" @@ -159,8 +155,7 @@ apache_check() { apache_get # shellcheck disable=2181 - if [ $? -ne 0 ] - then + if [ $? -ne 0 ]; then # shellcheck disable=2154 error "cannot find stub_status on URL '${apache_url}'. Please set apache_url='http://apache.server:80/server-status?auto' in $confd/apache.conf" return 1 @@ -191,8 +186,7 @@ CHART apache_local.net '' "apache Bandwidth" "kilobits/s" bandwidth apache.net a DIMENSION sent '' incremental 8 1 EOF - if [ ${apache_has_conns} -eq 1 ] - then + if [ ${apache_has_conns} -eq 1 ]; then cat </dev/null - # shellcheck disable=2181 - if [ $? -ne 0 ] - then - error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}." - failed=$((failed + 1)) - elif [ "$(apcupsd_get "${apcupsd_sources[${host}]}" | awk '/^STATUS.*/{ print $3 }')" != "ONLINE" ] - then - error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online." - failed=$((failed + 1)) - else - working=$((working + 1)) - fi - done - - if [ ${working} -eq 0 ] - then - error "No APC UPSes found available." - return 1 - fi + local host working=0 failed=0 + for host in "${!apcupsd_sources[@]}"; do + run apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null + # shellcheck disable=2181 + if [ $? -ne 0 ]; then + error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}." + failed=$((failed + 1)) + elif [ "$(apcupsd_get "${apcupsd_sources[${host}]}" | awk '/^STATUS.*/{ print $3 }')" != "ONLINE" ]; then + error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online." + failed=$((failed + 1)) + else + working=$((working + 1)) + fi + done + + if [ ${working} -eq 0 ]; then + error "No APC UPSes found available." + return 1 + fi return 0 } apcupsd_create() { - local host src - for host in "${!apcupsd_sources[@]}" - do - src=${apcupsd_sources[${host}]} + local host src + for host in "${!apcupsd_sources[@]}"; do + src=${apcupsd_sources[${host}]} - # create the charts - cat </dev/null 2>&1 -if [ $? -ne 0 ] -then - # they differ, we cannot do the check - echo >&2 "$me: cannot check with diff." - can_diff=0 +if [ $? -ne 0 ]; then + # they differ, we cannot do the check + echo >&2 "$me: cannot check with diff." + can_diff=0 fi # do it again, now including the script myset >"$tmp1" # include the plugin and its config -if [ -f "$conf" ] -then - # shellcheck source=/dev/null - . "$conf" - if [ $? -ne 0 ] - then - echo >&2 "$me: cannot load config file $conf" - rm "$tmp1" "$tmp2" - exit 1 - fi +if [ -f "$conf" ]; then + # shellcheck source=/dev/null + . "$conf" + if [ $? -ne 0 ]; then + echo >&2 "$me: cannot load config file $conf" + rm "$tmp1" "$tmp2" + exit 1 + fi fi # shellcheck source=/dev/null . "$chart" -if [ $? -ne 0 ] -then - echo >&2 "$me: cannot load chart file $chart" - rm "$tmp1" "$tmp2" - exit 1 +if [ $? -ne 0 ]; then + echo >&2 "$me: cannot load chart file $chart" + rm "$tmp1" "$tmp2" + exit 1 fi # remove all variables starting with the plugin name myset | grep -v "^$name" >"$tmp2" -if [ $can_diff -eq 1 ] -then - # check if they are different - # make sure they don't differ - diff "$tmp1" "$tmp2" >&2 - if [ $? -ne 0 ] - then - # they differ - rm "$tmp1" "$tmp2" - exit 1 - fi +if [ $can_diff -eq 1 ]; then + # check if they are different + # make sure they don't differ + diff "$tmp1" "$tmp2" >&2 + if [ $? -ne 0 ]; then + # they differ + rm "$tmp1" "$tmp2" + exit 1 + fi fi rm "$tmp1" "$tmp2" diff --git a/collectors/charts.d.plugin/charts.d.plugin b/collectors/charts.d.plugin/charts.d.plugin deleted file mode 100644 index 1c6e8c5c9..000000000 --- a/collectors/charts.d.plugin/charts.d.plugin +++ /dev/null @@ -1,743 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis -# GPL v3+ -# -# charts.d.plugin allows easy development of BASH plugins -# -# if you need to run parallel charts.d processes, link this file to a different name -# in the same directory, with a .plugin suffix and netdata will start both of them, -# each will have a different config file and modules configuration directory. -# - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin" - -PROGRAM_FILE="$0" -PROGRAM_NAME="$(basename $0)" -PROGRAM_NAME="${PROGRAM_NAME/.plugin}" -MODULE_NAME="main" - -# ----------------------------------------------------------------------------- -# create temp dir - -debug=0 -TMP_DIR= -chartsd_cleanup() { - trap '' EXIT QUIT HUP INT TERM - - if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ] - then - [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..." - rm -rf "$TMP_DIR" - fi - exit 0 -} -trap chartsd_cleanup EXIT QUIT HUP INT TERM - -if [ $UID = "0" ] -then - TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )" -else - TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )" -fi - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - echo "DISABLE" - exit 1 -} - -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" -} - -# ----------------------------------------------------------------------------- -# check a few commands - -require_cmd() { - local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null) - if [ -z "${x}" -o ! -x "${x}" ] - then - warning "command '${1}' is not found in ${PATH}." - eval "${1^^}_CMD=\"\"" - return 1 - fi - - eval "${1^^}_CMD=\"${x}\"" - return 0 -} - -require_cmd date || exit 1 -require_cmd sed || exit 1 -require_cmd basename || exit 1 -require_cmd dirname || exit 1 -require_cmd cat || exit 1 -require_cmd grep || exit 1 -require_cmd egrep || exit 1 -require_cmd mktemp || exit 1 -require_cmd awk || exit 1 -require_cmd timeout || exit 1 -require_cmd curl || exit 1 - -# ----------------------------------------------------------------------------- - -[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade." - -info "started from '$PROGRAM_FILE' with options: $*" - -# ----------------------------------------------------------------------------- -# internal defaults -# netdata exposes a few environment variables for us - -[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" -[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" -[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" - -pluginsd="${NETDATA_PLUGINS_DIR}" -stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}" -userconfd="${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}" -olduserconfd="${NETDATA_USER_CONFIG_DIR}" -chartsd="$pluginsd/../charts.d" - -minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}" -update_every=${minimum_update_frequency} # this will be overwritten by the command line - -# work around for non BASH shells -charts_create="_create" -charts_update="_update" -charts_check="_check" -charts_undescore="_" - -# when making iterations, charts.d can loop more frequently -# to prevent plugins missing iterations. -# this is a percentage relative to update_every to align its -# iterations. -# The minimum is 10%, the maximum 100%. -# So, if update_every is 1 second and time_divisor is 50, -# charts.d will iterate every 500ms. -# Charts will be called to collect data only if the time -# passed since the last time the collected data is equal or -# above their update_every. -time_divisor=50 - -# number of seconds to run without restart -# after this time, charts.d.plugin will exit -# netdata will restart it -restart_timeout=$((3600 * 4)) - -# check if the charts.d plugins are using global variables -# they should not. -# It does not currently support BASH v4 arrays, so it is -# disabled -dryrunner=0 - -# check for timeout command -check_for_timeout=1 - -# the default enable/disable value for all charts -enable_all_charts="yes" - -# ----------------------------------------------------------------------------- -# parse parameters - -check=0 -chart_only= -while [ ! -z "$1" ] -do - if [ "$1" = "check" ] - then - check=1 - shift - continue - fi - - if [ "$1" = "debug" -o "$1" = "all" ] - then - debug=1 - shift - continue - fi - - if [ -f "$chartsd/$1.chart.sh" ] - then - debug=1 - chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )" - shift - continue - fi - - if [ -f "$chartsd/$1" ] - then - debug=1 - chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )" - shift - continue - fi - - # number check - n="$1" - x=$(( n )) - if [ "$x" = "$n" ] - then - shift - update_every=$x - [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency - continue - fi - - fatal "Cannot understand parameter $1. Aborting." -done - - -# ----------------------------------------------------------------------------- -# loop control - -# default sleep function -LOOPSLEEPMS_HIGHRES=0 -now_ms= -current_time_ms_default() { - now_ms="$(date +'%s')000" -} -current_time_ms="current_time_ms_default" -current_time_ms_accuracy=1 -mysleep="sleep" - -# if found and included, this file overwrites loopsleepms() -# and current_time_ms() with a high resolution timer function -# for precise looping. -source "$pluginsd/loopsleepms.sh.inc" -[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'." - -# ----------------------------------------------------------------------------- -# load my configuration - -for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf" -do - if [ -f "$myconfig" ] - then - source "$myconfig" - if [ $? -ne 0 ] - then - error "Config file '$myconfig' loaded with errors." - else - info "Configuration file '$myconfig' loaded." - fi - else - warning "Configuration file '$myconfig' not found." - fi -done - -# make sure time_divisor is right -time_divisor=$((time_divisor)) -[ $time_divisor -lt 10 ] && time_divisor=10 -[ $time_divisor -gt 100 ] && time_divisor=100 - - -# we check for the timeout command, after we load our -# configuration, so that the user may overwrite the -# timeout command we use, providing a function that -# can emulate the timeout command we need: -# > timeout SECONDS command ... -if [ $check_for_timeout -eq 1 ] - then - require_cmd timeout || exit 1 -fi - -# ----------------------------------------------------------------------------- -# internal checks - -# netdata passes the requested update frequency as the first argument -update_every=$(( update_every + 1 - 1)) # makes sure it is a number -test $update_every -eq 0 && update_every=1 # if it is zero, make it 1 - -# check the charts.d directory -[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'" - -# ----------------------------------------------------------------------------- -# library functions - -fixid() { - echo "$*" |\ - tr -c "[A-Z][a-z][0-9]" "_" |\ - sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\ - tr "[A-Z]" "[a-z]" -} - -run() { - local ret pid="${BASHPID}" t - - if [ "z${1}" = "z-t" -a "${2}" != "0" ] - then - t="${2}" - shift 2 - timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}" - ret=$? - else - "${@}" 2>"${TMP_DIR}/run.${pid}" - ret=$? - fi - - if [ ${ret} -ne 0 ] - then - { - printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '" - printf "%q " "${@}" - printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n" - cat "${TMP_DIR}/run.${pid}" - printf " --- END TRACE ---\n" - } >&2 - fi - rm "${TMP_DIR}/run.${pid}" - - return ${ret} -} - -# convert any floating point number -# to integer, give a multiplier -# the result is stored in ${FLOAT2INT_RESULT} -# so that no fork is necessary -# the multiplier must be a power of 10 -float2int() { - local f m="$2" a b l v=($1) - f=${v[0]} - - # the length of the multiplier - 1 - l=$(( ${#m} - 1 )) - - # check if the number is in scientific notation - if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]] - then - # convert it to decimal - # unfortunately, this fork cannot be avoided - # if you know of a way to avoid it, please let me know - f=$(printf "%0.${l}f" ${f}) - fi - - # split the floating point number - # in integer (a) and decimal (b) - a=${f/.*/} - b=${f/*./} - - # if the integer part is missing - # set it to zero - [ -z "${a}" ] && a="0" - - # strip leading zeros from the integer part - # base 10 convertion - a=$((10#$a)) - - # check the length of the decimal part - # against the length of the multiplier - if [ ${#b} -gt ${l} ] - then - # too many digits - take the most significant - b=${b:0:${l}} - - elif [ ${#b} -lt ${l} ] - then - # too few digits - pad with zero on the right - local z="00000000000000000000000" r=$((l - ${#b})) - b="${b}${z:0:${r}}" - fi - - # strip leading zeros from the decimal part - # base 10 convertion - b=$((10#$b)) - - # store the result - FLOAT2INT_RESULT=$(( (a * m) + b )) -} - - -# ----------------------------------------------------------------------------- -# charts check functions - -all_charts() { - cd "$chartsd" - [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1 - - ls *.chart.sh | sed "s/\.chart\.sh$//g" -} - -declare -A charts_enable_keyword=( - ['apache']="force" - ['cpu_apps']="force" - ['cpufreq']="force" - ['example']="force" - ['exim']="force" - ['hddtemp']="force" - ['load_average']="force" - ['mem_apps']="force" - ['mysql']="force" - ['nginx']="force" - ['phpfpm']="force" - ['postfix']="force" - ['sensors']="force" - ['squid']="force" - ['tomcat']="force" - ) - -all_enabled_charts() { - local charts= enabled= required= - - # find all enabled charts - - for chart in $( all_charts ) - do - MODULE_NAME="${chart}" - - eval "enabled=\$$chart" - if [ -z "${enabled}" ] - then - enabled="${enable_all_charts}" - fi - - required="${charts_enable_keyword[${chart}]}" - [ -z "${required}" ] && required="yes" - - if [ ! "${enabled}" = "${required}" ] - then - info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)." - else - debug "is enabled for auto-detection." - local charts="$charts $chart" - fi - done - MODULE_NAME="main" - - local charts2= - for chart in $charts - do - MODULE_NAME="${chart}" - - # check the enabled charts - local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )" - if [ -z "$check" ] - then - error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it." - continue - fi - - local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )" - if [ -z "$create" ] - then - error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it." - continue - fi - - local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )" - if [ -z "$update" ] - then - error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it." - continue - fi - - # check its config - #if [ -f "$userconfd/$chart.conf" ] - #then - # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ] - # then - # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it." - # continue - # fi - #fi - - #if [ $dryrunner -eq 1 ] - # then - # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null - # if [ $? -ne 0 ] - # then - # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it." - # continue - # fi - #fi - - local charts2="$charts2 $chart" - done - MODULE_NAME="main" - - echo $charts2 - debug "enabled charts: $charts2" -} - -# ----------------------------------------------------------------------------- -# load the charts - -suffix_retries="_retries" -suffix_update_every="_update_every" -active_charts= -for chart in $( all_enabled_charts ) -do - MODULE_NAME="${chart}" - - debug "loading module: '$chartsd/$chart.chart.sh'" - - source "$chartsd/$chart.chart.sh" - [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors." - - # first load the stock config - if [ -f "$stockconfd/$chart.conf" ] - then - debug "loading module configuration: '$stockconfd/$chart.conf'" - source "$stockconfd/$chart.conf" - [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors." - else - debug "not found module configuration: '$stockconfd/$chart.conf'" - fi - - # then load the user config (it overwrites the stock) - if [ -f "$userconfd/$chart.conf" ] - then - debug "loading module configuration: '$userconfd/$chart.conf'" - source "$userconfd/$chart.conf" - [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors." - else - debug "not found module configuration: '$userconfd/$chart.conf'" - - if [ -f "$olduserconfd/$chart.conf" ] - then - # support for very old netdata that had the charts.d module configs in /etc/netdata - info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'" - source "$olduserconfd/$chart.conf" - [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors." - fi - fi - - eval "dt=\$$chart$suffix_update_every" - dt=$(( dt + 1 - 1 )) # make sure it is a number - if [ $dt -lt $update_every ] - then - eval "$chart$suffix_update_every=$update_every" - fi - - $chart$charts_check - if [ $? -eq 0 ] - then - debug "module '$chart' activated" - active_charts="$active_charts $chart" - else - error "module's '$chart' check() function reports failure." - fi -done -MODULE_NAME="main" -debug "activated modules: $active_charts" - - -# ----------------------------------------------------------------------------- -# check overwrites - -# enable work time reporting -debug_time= -test $debug -eq 1 && debug_time=tellwork - -# if we only need a specific chart, remove all the others -if [ ! -z "${chart_only}" ] -then - debug "requested to run only for: '${chart_only}'" - check_charts= - for chart in $active_charts - do - if [ "$chart" = "$chart_only" ] - then - check_charts="$chart" - break - fi - done - active_charts="$check_charts" -fi -debug "activated charts: $active_charts" - -# stop if we just need a pre-check -if [ $check -eq 1 ] -then - info "CHECK RESULT" - info "Will run the charts: $active_charts" - exit 0 -fi - -# ----------------------------------------------------------------------------- - -cd "${TMP_DIR}" || exit 1 - -# ----------------------------------------------------------------------------- -# create charts - -run_charts= -for chart in $active_charts -do - MODULE_NAME="${chart}" - - debug "calling '$chart$charts_create()'..." - $chart$charts_create - if [ $? -eq 0 ] - then - run_charts="$run_charts $chart" - debug "'$chart' initialized." - else - error "module's '$chart' function '$chart$charts_create()' reports failure." - fi -done -MODULE_NAME="main" -debug "run_charts='$run_charts'" - - -# ----------------------------------------------------------------------------- -# update dimensions - -[ -z "$run_charts" ] && fatal "No charts to collect data from." - -declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=() -global_update() { - local exit_at \ - c=0 dt ret last_ms exec_start_ms exec_end_ms \ - chart now_charts=() next_charts=($run_charts) \ - next_ms x seconds millis - - # return the current time in ms in $now_ms - ${current_time_ms} - - exit_at=$(( now_ms + (restart_timeout * 1000) )) - - for chart in $run_charts - do - eval "charts_update_every[$chart]=\$$chart$suffix_update_every" - test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every - - eval "charts_retries[$chart]=\$$chart$suffix_retries" - test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10 - - charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) )) - charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) )) - charts_run_counter[$chart]=0 - charts_serial_failures[$chart]=0 - - echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}" - echo "DIMENSION run_time 'run time' absolute 1 1" - done - - # the main loop - while [ "${#next_charts[@]}" -gt 0 ] - do - c=$((c + 1)) - now_charts=("${next_charts[@]}") - next_charts=() - - # return the current time in ms in $now_ms - ${current_time_ms} - - for chart in "${now_charts[@]}" - do - MODULE_NAME="${chart}" - - if [ ${now_ms} -ge ${charts_next_update[$chart]} ] - then - last_ms=${charts_last_update[$chart]} - dt=$(( (now_ms - last_ms) )) - - charts_last_update[$chart]=${now_ms} - - while [ ${charts_next_update[$chart]} -lt ${now_ms} ] - do - charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) )) - done - - # the first call should not give a duration - # so that netdata calibrates to current time - dt=$(( dt * 1000 )) - charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 )) - if [ ${charts_run_counter[$chart]} -eq 1 ] - then - dt= - fi - - exec_start_ms=$now_ms - $chart$charts_update $dt - ret=$? - - # return the current time in ms in $now_ms - ${current_time_ms}; exec_end_ms=$now_ms - - echo "BEGIN netdata.plugin_chartsd_$chart $dt" - echo "SET run_time = $(( exec_end_ms - exec_start_ms ))" - echo "END" - - if [ $ret -eq 0 ] - then - charts_serial_failures[$chart]=0 - next_charts+=($chart) - else - charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 )) - - if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ] - then - error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it." - else - error "module's '$chart' update() function reports failure. Will keep trying for a while." - next_charts+=($chart) - fi - fi - else - next_charts+=($chart) - fi - done - MODULE_NAME="${chart}" - - # wait the time you are required to - next_ms=$((now_ms + (update_every * 1000 * 100) )) - for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done - next_ms=$((next_ms - now_ms)) - - if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ] - then - next_ms=$(( next_ms + current_time_ms_accuracy )) - seconds=$(( next_ms / 1000 )) - millis=$(( next_ms % 1000 )) - if [ ${millis} -lt 10 ] - then - millis="00${millis}" - elif [ ${millis} -lt 100 ] - then - millis="0${millis}" - fi - - debug "sleeping for ${seconds}.${millis} seconds." - ${mysleep} ${seconds}.${millis} - else - debug "sleeping for ${update_every} seconds." - ${mysleep} $update_every - fi - - test ${now_ms} -ge ${exit_at} && exit 0 - done - - fatal "nothing left to do, exiting..." -} - -global_update diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in index 3477894d8..05a63875b 100755 --- a/collectors/charts.d.plugin/charts.d.plugin.in +++ b/collectors/charts.d.plugin/charts.d.plugin.in @@ -17,7 +17,7 @@ export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin" PROGRAM_FILE="$0" PROGRAM_NAME="$(basename $0)" -PROGRAM_NAME="${PROGRAM_NAME/.plugin}" +PROGRAM_NAME="${PROGRAM_NAME/.plugin/}" MODULE_NAME="main" # ----------------------------------------------------------------------------- @@ -26,72 +26,69 @@ MODULE_NAME="main" debug=0 TMP_DIR= chartsd_cleanup() { - trap '' EXIT QUIT HUP INT TERM - - if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ] - then - [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..." - rm -rf "$TMP_DIR" - fi - exit 0 + trap '' EXIT QUIT HUP INT TERM + + if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]; then + [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..." + rm -rf "$TMP_DIR" + fi + exit 0 } trap chartsd_cleanup EXIT QUIT HUP INT TERM -if [ $UID = "0" ] -then - TMP_DIR="$( mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX )" +if [ $UID = "0" ]; then + TMP_DIR="$(mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX)" else - TMP_DIR="$( mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX )" + TMP_DIR="$(mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX)" fi logdate() { - date "+%Y-%m-%d %H:%M:%S" + date "+%Y-%m-%d %H:%M:%S" } log() { - local status="${1}" - shift + local status="${1}" + shift - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}" + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: ${*}" } warning() { - log WARNING "${@}" + log WARNING "${@}" } error() { - log ERROR "${@}" + log ERROR "${@}" } info() { - log INFO "${@}" + log INFO "${@}" } fatal() { - log FATAL "${@}" - echo "DISABLE" - exit 1 + log FATAL "${@}" + echo "DISABLE" + exit 1 } debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" + [ $debug -eq 1 ] && log DEBUG "${@}" } # ----------------------------------------------------------------------------- # check a few commands require_cmd() { - local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null) - if [ -z "${x}" -o ! -x "${x}" ] - then - warning "command '${1}' is not found in ${PATH}." - eval "${1^^}_CMD=\"\"" - return 1 - fi - - eval "${1^^}_CMD=\"${x}\"" - return 0 + local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null) + if [ -z "${x}" -o ! -x "${x}" ]; then + warning "command '${1}' is not found in ${PATH}." + eval "${1^^}_CMD=\"\"" + return 1 + fi + + eval "${1^^}_CMD=\"${x}\"" + return 0 } require_cmd date || exit 1 @@ -108,7 +105,7 @@ require_cmd curl || exit 1 # ----------------------------------------------------------------------------- -[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade." +[ $((BASH_VERSINFO[0])) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade." info "started from '$PROGRAM_FILE' with options: $*" @@ -117,7 +114,7 @@ info "started from '$PROGRAM_FILE' with options: $*" # netdata exposes a few environment variables for us [ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" -[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" [ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" pluginsd="${NETDATA_PLUGINS_DIR}" @@ -127,7 +124,7 @@ olduserconfd="${NETDATA_USER_CONFIG_DIR}" chartsd="$pluginsd/../charts.d" minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}" -update_every=${minimum_update_frequency} # this will be overwritten by the command line +update_every=${minimum_update_frequency} # this will be overwritten by the command line # work around for non BASH shells charts_create="_create" @@ -169,53 +166,46 @@ enable_all_charts="yes" check=0 chart_only= -while [ ! -z "$1" ] -do - if [ "$1" = "check" ] - then - check=1 - shift - continue - fi - - if [ "$1" = "debug" -o "$1" = "all" ] - then - debug=1 - shift - continue - fi - - if [ -f "$chartsd/$1.chart.sh" ] - then - debug=1 - chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )" - shift - continue - fi - - if [ -f "$chartsd/$1" ] - then - debug=1 - chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )" - shift - continue - fi - - # number check - n="$1" - x=$(( n )) - if [ "$x" = "$n" ] - then - shift - update_every=$x - [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency - continue - fi - - fatal "Cannot understand parameter $1. Aborting." +while [ ! -z "$1" ]; do + if [ "$1" = "check" ]; then + check=1 + shift + continue + fi + + if [ "$1" = "debug" -o "$1" = "all" ]; then + debug=1 + shift + continue + fi + + if [ -f "$chartsd/$1.chart.sh" ]; then + debug=1 + chart_only="$(echo $1.chart.sh | sed "s/\.chart\.sh$//g")" + shift + continue + fi + + if [ -f "$chartsd/$1" ]; then + debug=1 + chart_only="$(echo $1 | sed "s/\.chart\.sh$//g")" + shift + continue + fi + + # number check + n="$1" + x=$((n)) + if [ "$x" = "$n" ]; then + shift + update_every=$x + [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency + continue + fi + + fatal "Cannot understand parameter $1. Aborting." done - # ----------------------------------------------------------------------------- # loop control @@ -223,7 +213,7 @@ done LOOPSLEEPMS_HIGHRES=0 now_ms= current_time_ms_default() { - now_ms="$(date +'%s')000" + now_ms="$(date +'%s')000" } current_time_ms="current_time_ms_default" current_time_ms_accuracy=1 @@ -238,20 +228,17 @@ source "$pluginsd/loopsleepms.sh.inc" # ----------------------------------------------------------------------------- # load my configuration -for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf" -do - if [ -f "$myconfig" ] - then - source "$myconfig" - if [ $? -ne 0 ] - then - error "Config file '$myconfig' loaded with errors." - else - info "Configuration file '$myconfig' loaded." - fi - else - warning "Configuration file '$myconfig' not found." - fi +for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf"; do + if [ -f "$myconfig" ]; then + source "$myconfig" + if [ $? -ne 0 ]; then + error "Config file '$myconfig' loaded with errors." + else + info "Configuration file '$myconfig' loaded." + fi + else + warning "Configuration file '$myconfig' not found." + fi done # make sure time_divisor is right @@ -259,22 +246,20 @@ time_divisor=$((time_divisor)) [ $time_divisor -lt 10 ] && time_divisor=10 [ $time_divisor -gt 100 ] && time_divisor=100 - # we check for the timeout command, after we load our # configuration, so that the user may overwrite the # timeout command we use, providing a function that # can emulate the timeout command we need: # > timeout SECONDS command ... -if [ $check_for_timeout -eq 1 ] - then - require_cmd timeout || exit 1 +if [ $check_for_timeout -eq 1 ]; then + require_cmd timeout || exit 1 fi # ----------------------------------------------------------------------------- # internal checks # netdata passes the requested update frequency as the first argument -update_every=$(( update_every + 1 - 1)) # makes sure it is a number +update_every=$((update_every + 1 - 1)) # makes sure it is a number test $update_every -eq 0 && update_every=1 # if it is zero, make it 1 # check the charts.d directory @@ -284,39 +269,37 @@ test $update_every -eq 0 && update_every=1 # if it is zero, make it 1 # library functions fixid() { - echo "$*" |\ - tr -c "[A-Z][a-z][0-9]" "_" |\ - sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\ - tr "[A-Z]" "[a-z]" + echo "$*" | + tr -c "[A-Z][a-z][0-9]" "_" | + sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" | + tr "[A-Z]" "[a-z]" } run() { - local ret pid="${BASHPID}" t - - if [ "z${1}" = "z-t" -a "${2}" != "0" ] - then - t="${2}" - shift 2 - timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}" - ret=$? - else - "${@}" 2>"${TMP_DIR}/run.${pid}" - ret=$? - fi - - if [ ${ret} -ne 0 ] - then - { - printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '" - printf "%q " "${@}" - printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n" - cat "${TMP_DIR}/run.${pid}" - printf " --- END TRACE ---\n" - } >&2 - fi - rm "${TMP_DIR}/run.${pid}" - - return ${ret} + local ret pid="${BASHPID}" t + + if [ "z${1}" = "z-t" -a "${2}" != "0" ]; then + t="${2}" + shift 2 + timeout ${t} "${@}" 2>"${TMP_DIR}/run.${pid}" + ret=$? + else + "${@}" 2>"${TMP_DIR}/run.${pid}" + ret=$? + fi + + if [ ${ret} -ne 0 ]; then + { + printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '" + printf "%q " "${@}" + printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n" + cat "${TMP_DIR}/run.${pid}" + printf " --- END TRACE ---\n" + } >&2 + fi + rm "${TMP_DIR}/run.${pid}" + + return ${ret} } # convert any floating point number @@ -325,166 +308,155 @@ run() { # so that no fork is necessary # the multiplier must be a power of 10 float2int() { - local f m="$2" a b l v=($1) - f=${v[0]} - - # the length of the multiplier - 1 - l=$(( ${#m} - 1 )) - - # check if the number is in scientific notation - if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]] - then - # convert it to decimal - # unfortunately, this fork cannot be avoided - # if you know of a way to avoid it, please let me know - f=$(printf "%0.${l}f" ${f}) - fi - - # split the floating point number - # in integer (a) and decimal (b) - a=${f/.*/} - b=${f/*./} - - # if the integer part is missing - # set it to zero - [ -z "${a}" ] && a="0" - - # strip leading zeros from the integer part - # base 10 convertion - a=$((10#$a)) - - # check the length of the decimal part - # against the length of the multiplier - if [ ${#b} -gt ${l} ] - then - # too many digits - take the most significant - b=${b:0:${l}} - - elif [ ${#b} -lt ${l} ] - then - # too few digits - pad with zero on the right - local z="00000000000000000000000" r=$((l - ${#b})) - b="${b}${z:0:${r}}" - fi - - # strip leading zeros from the decimal part - # base 10 convertion - b=$((10#$b)) - - # store the result - FLOAT2INT_RESULT=$(( (a * m) + b )) + local f m="$2" a b l v=($1) + f=${v[0]} + + # the length of the multiplier - 1 + l=$((${#m} - 1)) + + # check if the number is in scientific notation + if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]; then + # convert it to decimal + # unfortunately, this fork cannot be avoided + # if you know of a way to avoid it, please let me know + f=$(printf "%0.${l}f" ${f}) + fi + + # split the floating point number + # in integer (a) and decimal (b) + a=${f/.*/} + b=${f/*./} + + # if the integer part is missing + # set it to zero + [ -z "${a}" ] && a="0" + + # strip leading zeros from the integer part + # base 10 convertion + a=$((10#$a)) + + # check the length of the decimal part + # against the length of the multiplier + if [ ${#b} -gt ${l} ]; then + # too many digits - take the most significant + b=${b:0:l} + + elif [ ${#b} -lt ${l} ]; then + # too few digits - pad with zero on the right + local z="00000000000000000000000" r=$((l - ${#b})) + b="${b}${z:0:r}" + fi + + # strip leading zeros from the decimal part + # base 10 convertion + b=$((10#$b)) + + # store the result + FLOAT2INT_RESULT=$(((a * m) + b)) } - # ----------------------------------------------------------------------------- # charts check functions all_charts() { - cd "$chartsd" - [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1 + cd "$chartsd" + [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1 - ls *.chart.sh | sed "s/\.chart\.sh$//g" + ls *.chart.sh | sed "s/\.chart\.sh$//g" } declare -A charts_enable_keyword=( - ['apache']="force" - ['cpu_apps']="force" - ['cpufreq']="force" - ['example']="force" - ['exim']="force" - ['hddtemp']="force" - ['load_average']="force" - ['mem_apps']="force" - ['mysql']="force" - ['nginx']="force" - ['phpfpm']="force" - ['postfix']="force" - ['sensors']="force" - ['squid']="force" - ['tomcat']="force" - ) + ['apache']="force" + ['cpu_apps']="force" + ['cpufreq']="force" + ['example']="force" + ['exim']="force" + ['hddtemp']="force" + ['load_average']="force" + ['mem_apps']="force" + ['mysql']="force" + ['nginx']="force" + ['phpfpm']="force" + ['postfix']="force" + ['sensors']="force" + ['squid']="force" + ['tomcat']="force" +) all_enabled_charts() { - local charts= enabled= required= - - # find all enabled charts - - for chart in $( all_charts ) - do - MODULE_NAME="${chart}" - - eval "enabled=\$$chart" - if [ -z "${enabled}" ] - then - enabled="${enable_all_charts}" - fi - - required="${charts_enable_keyword[${chart}]}" - [ -z "${required}" ] && required="yes" - - if [ ! "${enabled}" = "${required}" ] - then - info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)." - else - debug "is enabled for auto-detection." - local charts="$charts $chart" - fi - done - MODULE_NAME="main" - - local charts2= - for chart in $charts - do - MODULE_NAME="${chart}" - - # check the enabled charts - local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )" - if [ -z "$check" ] - then - error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it." - continue - fi - - local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )" - if [ -z "$create" ] - then - error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it." - continue - fi - - local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )" - if [ -z "$update" ] - then - error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it." - continue - fi - - # check its config - #if [ -f "$userconfd/$chart.conf" ] - #then - # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ] - # then - # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it." - # continue - # fi - #fi - - #if [ $dryrunner -eq 1 ] - # then - # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null - # if [ $? -ne 0 ] - # then - # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it." - # continue - # fi - #fi - - local charts2="$charts2 $chart" - done - MODULE_NAME="main" - - echo $charts2 - debug "enabled charts: $charts2" + local charts= enabled= required= + + # find all enabled charts + + for chart in $(all_charts); do + MODULE_NAME="${chart}" + + eval "enabled=\$$chart" + if [ -z "${enabled}" ]; then + enabled="${enable_all_charts}" + fi + + required="${charts_enable_keyword[${chart}]}" + [ -z "${required}" ] && required="yes" + + if [ ! "${enabled}" = "${required}" ]; then + info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)." + else + debug "is enabled for auto-detection." + local charts="$charts $chart" + fi + done + MODULE_NAME="main" + + local charts2= + for chart in $charts; do + MODULE_NAME="${chart}" + + # check the enabled charts + local check="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()")" + if [ -z "$check" ]; then + error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it." + continue + fi + + local create="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()")" + if [ -z "$create" ]; then + error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it." + continue + fi + + local update="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()")" + if [ -z "$update" ]; then + error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it." + continue + fi + + # check its config + #if [ -f "$userconfd/$chart.conf" ] + #then + # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ] + # then + # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it." + # continue + # fi + #fi + + #if [ $dryrunner -eq 1 ] + # then + # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null + # if [ $? -ne 0 ] + # then + # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it." + # continue + # fi + #fi + + local charts2="$charts2 $chart" + done + MODULE_NAME="main" + + echo $charts2 + debug "enabled charts: $charts2" } # ----------------------------------------------------------------------------- @@ -493,63 +465,56 @@ all_enabled_charts() { suffix_retries="_retries" suffix_update_every="_update_every" active_charts= -for chart in $( all_enabled_charts ) -do - MODULE_NAME="${chart}" - - debug "loading module: '$chartsd/$chart.chart.sh'" - - source "$chartsd/$chart.chart.sh" - [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors." - - # first load the stock config - if [ -f "$stockconfd/$chart.conf" ] - then - debug "loading module configuration: '$stockconfd/$chart.conf'" - source "$stockconfd/$chart.conf" - [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors." - else - debug "not found module configuration: '$stockconfd/$chart.conf'" - fi - - # then load the user config (it overwrites the stock) - if [ -f "$userconfd/$chart.conf" ] - then - debug "loading module configuration: '$userconfd/$chart.conf'" - source "$userconfd/$chart.conf" - [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors." - else - debug "not found module configuration: '$userconfd/$chart.conf'" - - if [ -f "$olduserconfd/$chart.conf" ] - then - # support for very old netdata that had the charts.d module configs in /etc/netdata - info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'" - source "$olduserconfd/$chart.conf" - [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors." - fi - fi - - eval "dt=\$$chart$suffix_update_every" - dt=$(( dt + 1 - 1 )) # make sure it is a number - if [ $dt -lt $update_every ] - then - eval "$chart$suffix_update_every=$update_every" - fi - - $chart$charts_check - if [ $? -eq 0 ] - then - debug "module '$chart' activated" - active_charts="$active_charts $chart" - else - error "module's '$chart' check() function reports failure." - fi +for chart in $(all_enabled_charts); do + MODULE_NAME="${chart}" + + debug "loading module: '$chartsd/$chart.chart.sh'" + + source "$chartsd/$chart.chart.sh" + [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors." + + # first load the stock config + if [ -f "$stockconfd/$chart.conf" ]; then + debug "loading module configuration: '$stockconfd/$chart.conf'" + source "$stockconfd/$chart.conf" + [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors." + else + debug "not found module configuration: '$stockconfd/$chart.conf'" + fi + + # then load the user config (it overwrites the stock) + if [ -f "$userconfd/$chart.conf" ]; then + debug "loading module configuration: '$userconfd/$chart.conf'" + source "$userconfd/$chart.conf" + [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors." + else + debug "not found module configuration: '$userconfd/$chart.conf'" + + if [ -f "$olduserconfd/$chart.conf" ]; then + # support for very old netdata that had the charts.d module configs in /etc/netdata + info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'" + source "$olduserconfd/$chart.conf" + [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors." + fi + fi + + eval "dt=\$$chart$suffix_update_every" + dt=$((dt + 1 - 1)) # make sure it is a number + if [ $dt -lt $update_every ]; then + eval "$chart$suffix_update_every=$update_every" + fi + + $chart$charts_check + if [ $? -eq 0 ]; then + debug "module '$chart' activated" + active_charts="$active_charts $chart" + else + error "module's '$chart' check() function reports failure." + fi done MODULE_NAME="main" debug "activated modules: $active_charts" - # ----------------------------------------------------------------------------- # check overwrites @@ -558,28 +523,24 @@ debug_time= test $debug -eq 1 && debug_time=tellwork # if we only need a specific chart, remove all the others -if [ ! -z "${chart_only}" ] -then - debug "requested to run only for: '${chart_only}'" - check_charts= - for chart in $active_charts - do - if [ "$chart" = "$chart_only" ] - then - check_charts="$chart" - break - fi - done - active_charts="$check_charts" +if [ ! -z "${chart_only}" ]; then + debug "requested to run only for: '${chart_only}'" + check_charts= + for chart in $active_charts; do + if [ "$chart" = "$chart_only" ]; then + check_charts="$chart" + break + fi + done + active_charts="$check_charts" fi debug "activated charts: $active_charts" # stop if we just need a pre-check -if [ $check -eq 1 ] -then - info "CHECK RESULT" - info "Will run the charts: $active_charts" - exit 0 +if [ $check -eq 1 ]; then + info "CHECK RESULT" + info "Will run the charts: $active_charts" + exit 0 fi # ----------------------------------------------------------------------------- @@ -590,24 +551,21 @@ cd "${TMP_DIR}" || exit 1 # create charts run_charts= -for chart in $active_charts -do - MODULE_NAME="${chart}" - - debug "calling '$chart$charts_create()'..." - $chart$charts_create - if [ $? -eq 0 ] - then - run_charts="$run_charts $chart" - debug "'$chart' initialized." - else - error "module's '$chart' function '$chart$charts_create()' reports failure." - fi +for chart in $active_charts; do + MODULE_NAME="${chart}" + + debug "calling '$chart$charts_create()'..." + $chart$charts_create + if [ $? -eq 0 ]; then + run_charts="$run_charts $chart" + debug "'$chart' initialized." + else + error "module's '$chart' function '$chart$charts_create()' reports failure." + fi done MODULE_NAME="main" debug "run_charts='$run_charts'" - # ----------------------------------------------------------------------------- # update dimensions @@ -615,129 +573,119 @@ debug "run_charts='$run_charts'" declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=() global_update() { - local exit_at \ - c=0 dt ret last_ms exec_start_ms exec_end_ms \ - chart now_charts=() next_charts=($run_charts) \ - next_ms x seconds millis - - # return the current time in ms in $now_ms - ${current_time_ms} - - exit_at=$(( now_ms + (restart_timeout * 1000) )) - - for chart in $run_charts - do - eval "charts_update_every[$chart]=\$$chart$suffix_update_every" - test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every - - eval "charts_retries[$chart]=\$$chart$suffix_retries" - test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10 - - charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000) ) )) - charts_next_update[$chart]=$(( charts_last_update[$chart] + (charts_update_every[$chart] * 1000) )) - charts_run_counter[$chart]=0 - charts_serial_failures[$chart]=0 - - echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}" - echo "DIMENSION run_time 'run time' absolute 1 1" - done - - # the main loop - while [ "${#next_charts[@]}" -gt 0 ] - do - c=$((c + 1)) - now_charts=("${next_charts[@]}") - next_charts=() - - # return the current time in ms in $now_ms - ${current_time_ms} - - for chart in "${now_charts[@]}" - do - MODULE_NAME="${chart}" - - if [ ${now_ms} -ge ${charts_next_update[$chart]} ] - then - last_ms=${charts_last_update[$chart]} - dt=$(( (now_ms - last_ms) )) - - charts_last_update[$chart]=${now_ms} - - while [ ${charts_next_update[$chart]} -lt ${now_ms} ] - do - charts_next_update[$chart]=$(( charts_next_update[$chart] + (charts_update_every[$chart] * 1000) )) - done - - # the first call should not give a duration - # so that netdata calibrates to current time - dt=$(( dt * 1000 )) - charts_run_counter[$chart]=$(( charts_run_counter[$chart] + 1 )) - if [ ${charts_run_counter[$chart]} -eq 1 ] - then - dt= - fi - - exec_start_ms=$now_ms - $chart$charts_update $dt - ret=$? - - # return the current time in ms in $now_ms - ${current_time_ms}; exec_end_ms=$now_ms - - echo "BEGIN netdata.plugin_chartsd_$chart $dt" - echo "SET run_time = $(( exec_end_ms - exec_start_ms ))" - echo "END" - - if [ $ret -eq 0 ] - then - charts_serial_failures[$chart]=0 - next_charts+=($chart) - else - charts_serial_failures[$chart]=$(( charts_serial_failures[$chart] + 1 )) - - if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ] - then - error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it." - else - error "module's '$chart' update() function reports failure. Will keep trying for a while." - next_charts+=($chart) - fi - fi - else - next_charts+=($chart) - fi - done - MODULE_NAME="${chart}" - - # wait the time you are required to - next_ms=$((now_ms + (update_every * 1000 * 100) )) - for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done - next_ms=$((next_ms - now_ms)) - - if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ] - then - next_ms=$(( next_ms + current_time_ms_accuracy )) - seconds=$(( next_ms / 1000 )) - millis=$(( next_ms % 1000 )) - if [ ${millis} -lt 10 ] - then - millis="00${millis}" - elif [ ${millis} -lt 100 ] - then - millis="0${millis}" - fi - - debug "sleeping for ${seconds}.${millis} seconds." - ${mysleep} ${seconds}.${millis} - else - debug "sleeping for ${update_every} seconds." - ${mysleep} $update_every - fi - - test ${now_ms} -ge ${exit_at} && exit 0 - done - - fatal "nothing left to do, exiting..." + local exit_at \ + c=0 dt ret last_ms exec_start_ms exec_end_ms \ + chart now_charts=() next_charts=($run_charts) \ + next_ms x seconds millis + + # return the current time in ms in $now_ms + ${current_time_ms} + + exit_at=$((now_ms + (restart_timeout * 1000))) + + for chart in $run_charts; do + eval "charts_update_every[$chart]=\$$chart$suffix_update_every" + test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every + + eval "charts_retries[$chart]=\$$chart$suffix_retries" + test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10 + + charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000)))) + charts_next_update[$chart]=$((charts_last_update[$chart] + (charts_update_every[$chart] * 1000))) + charts_run_counter[$chart]=0 + charts_serial_failures[$chart]=0 + + echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}" + echo "DIMENSION run_time 'run time' absolute 1 1" + done + + # the main loop + while [ "${#next_charts[@]}" -gt 0 ]; do + c=$((c + 1)) + now_charts=("${next_charts[@]}") + next_charts=() + + # return the current time in ms in $now_ms + ${current_time_ms} + + for chart in "${now_charts[@]}"; do + MODULE_NAME="${chart}" + + if [ ${now_ms} -ge ${charts_next_update[$chart]} ]; then + last_ms=${charts_last_update[$chart]} + dt=$((now_ms - last_ms)) + + charts_last_update[$chart]=${now_ms} + + while [ ${charts_next_update[$chart]} -lt ${now_ms} ]; do + charts_next_update[$chart]=$((charts_next_update[$chart] + (charts_update_every[$chart] * 1000))) + done + + # the first call should not give a duration + # so that netdata calibrates to current time + dt=$((dt * 1000)) + charts_run_counter[$chart]=$((charts_run_counter[$chart] + 1)) + if [ ${charts_run_counter[$chart]} -eq 1 ]; then + dt= + fi + + exec_start_ms=$now_ms + $chart$charts_update $dt + ret=$? + + # return the current time in ms in $now_ms + ${current_time_ms} + exec_end_ms=$now_ms + + echo "BEGIN netdata.plugin_chartsd_$chart $dt" + echo "SET run_time = $((exec_end_ms - exec_start_ms))" + echo "END" + + if [ $ret -eq 0 ]; then + charts_serial_failures[$chart]=0 + next_charts+=($chart) + else + charts_serial_failures[$chart]=$((charts_serial_failures[$chart] + 1)) + + if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]; then + error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it." + else + error "module's '$chart' update() function reports failure. Will keep trying for a while." + next_charts+=($chart) + fi + fi + else + next_charts+=($chart) + fi + done + MODULE_NAME="${chart}" + + # wait the time you are required to + next_ms=$((now_ms + (update_every * 1000 * 100))) + for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done + next_ms=$((next_ms - now_ms)) + + if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]; then + next_ms=$((next_ms + current_time_ms_accuracy)) + seconds=$((next_ms / 1000)) + millis=$((next_ms % 1000)) + if [ ${millis} -lt 10 ]; then + millis="00${millis}" + elif [ ${millis} -lt 100 ]; then + millis="0${millis}" + fi + + debug "sleeping for ${seconds}.${millis} seconds." + ${mysleep} ${seconds}.${millis} + else + debug "sleeping for ${update_every} seconds." + ${mysleep} $update_every + fi + + test ${now_ms} -ge ${exit_at} && exit 0 + done + + fatal "nothing left to do, exiting..." } global_update diff --git a/collectors/charts.d.plugin/cpu_apps/README.md b/collectors/charts.d.plugin/cpu_apps/README.md index cd8adf0a2..a32a6330a 100644 --- a/collectors/charts.d.plugin/cpu_apps/README.md +++ b/collectors/charts.d.plugin/cpu_apps/README.md @@ -1,2 +1,6 @@ +# cpu_apps + > THIS MODULE IS OBSOLETE. -> USE APPS.PLUGIN. +> USE [APPS.PLUGIN](../../apps.plugin). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpu_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh index 869464afe..e91c46d54 100644 --- a/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh +++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh @@ -23,8 +23,7 @@ cpu_apps_check() { # - 0 to enable the chart # - 1 to disable the chart - if [ -z "$cpu_apps_apps" ] - then + if [ -z "$cpu_apps_apps" ]; then error "manual configuration required: please set cpu_apps_apps='command1 command2 ...' in $confd/cpu_apps_apps.conf" return 1 fi @@ -38,8 +37,7 @@ cpu_apps_create() { echo "CHART chartsd_apps.cpu '' 'Apps CPU' 'milliseconds / $cpu_apps_update_every sec' apps apps stacked 20001 $cpu_apps_update_every" local x= - for x in $cpu_apps_apps - do + for x in $cpu_apps_apps; do echo "DIMENSION $x $x incremental 1000 $cpu_apps_clockticks" # this string is needed later in the update() function @@ -55,15 +53,15 @@ cpu_apps_update() { # remember: KEEP IT SIMPLE AND SHORT echo "BEGIN chartsd_apps.cpu" - ps -o pid,comm -C "$cpu_apps_apps" |\ - grep -v "COMMAND" |\ + ps -o pid,comm -C "$cpu_apps_apps" | + grep -v "COMMAND" | ( - while read pid name - do - echo "$name+=`cat /proc/$pid/stat | cut -d ' ' -f 14-15`" + while read pid name; do + echo "$name+=$(cat /proc/$pid/stat | cut -d ' ' -f 14-15)" done - ) |\ - ( sed -e "s/ \+/ /g" -e "s/ /+/g"; + ) | + ( + sed -e "s/ \+/ /g" -e "s/ /+/g" echo "$cpu_apps_bc_finalze" ) | bc echo "END" diff --git a/collectors/charts.d.plugin/cpufreq/README.md b/collectors/charts.d.plugin/cpufreq/README.md index d82951aac..84883f583 100644 --- a/collectors/charts.d.plugin/cpufreq/README.md +++ b/collectors/charts.d.plugin/cpufreq/README.md @@ -1,2 +1,6 @@ +# cpufreq + > THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT +> USE THE [PROC PLUGIN](../../proc.plugin) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh index 1fc6caabf..68708d911 100644 --- a/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh +++ b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh @@ -30,7 +30,7 @@ cpufreq_check() { # - 0 to enable the chart # - 1 to disable the chart - [ -z "$( cpufreq_find_all_files "$cpufreq_sys_dir" )" ] && return 1 + [ -z "$(cpufreq_find_all_files "$cpufreq_sys_dir")" ] && return 1 return 0 } @@ -47,16 +47,15 @@ cpufreq_create() { echo >>"$TMP_DIR/cpufreq.sh" "echo \"BEGIN cpu.cpufreq \$1\"" i=0 - for file in $( cpufreq_find_all_files "$cpufreq_sys_dir" | sort -u ) - do - i=$(( i + 1 )) - dir=$( dirname "$file" ) + for file in $(cpufreq_find_all_files "$cpufreq_sys_dir" | sort -u); do + i=$((i + 1)) + dir=$(dirname "$file") cpu= - [ -f "$dir/affected_cpus" ] && cpu=$( cat "$dir/affected_cpus" ) + [ -f "$dir/affected_cpus" ] && cpu=$(cat "$dir/affected_cpus") [ -z "$cpu" ] && cpu="$i.a" - id="$( fixid "cpu$cpu" )" + id="$(fixid "cpu$cpu")" debug "file='$file', dir='$dir', cpu='$cpu', id='$id'" @@ -68,7 +67,7 @@ cpufreq_create() { [ $cpufreq_source_update -eq 1 ] && echo >>"$TMP_DIR/cpufreq.sh" "}" # ok, load the function cpufreq_update() we created - # shellcheck disable=SC1090 + # shellcheck disable=SC1090 [ $cpufreq_source_update -eq 1 ] && . "$TMP_DIR/cpufreq.sh" return 0 @@ -82,9 +81,8 @@ cpufreq_update() { # do all the work to collect / calculate the values # for each dimension # remember: KEEP IT SIMPLE AND SHORT - # shellcheck disable=SC1090 + # shellcheck disable=SC1090 [ $cpufreq_source_update -eq 0 ] && . "$TMP_DIR/cpufreq.sh" "$1" return 0 } - diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md index bfd5e210a..e62f76777 100644 --- a/collectors/charts.d.plugin/example/README.md +++ b/collectors/charts.d.plugin/example/README.md @@ -1,2 +1,6 @@ +# Example + This is just an example charts.d data collector. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh index 1562c597a..8bae570a3 100644 --- a/collectors/charts.d.plugin/example/example.chart.sh +++ b/collectors/charts.d.plugin/example/example.chart.sh @@ -44,23 +44,20 @@ example_get() { example_value1=$RANDOM example_value2=$RANDOM example_value3=$RANDOM - example_value4=$((8192 + (RANDOM * 16383 / 32767) )) + example_value4=$((8192 + (RANDOM * 16383 / 32767))) - if [ $example_count -gt 0 ] - then + if [ $example_count -gt 0 ]; then example_count=$((example_count - 1)) - [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ( (32767 - example_last) / 2) / 32767))) + [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ((32767 - example_last) / 2) / 32767))) [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767))) else - example_count=$((1 + (RANDOM * 5 / 32767) )) + example_count=$((1 + (RANDOM * 5 / 32767))) - if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ] - then + if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ]; then example_value4=$((example_value4 - 16383)) fi - if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ] - then + if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ]; then example_value4=$((example_value4 + 16383)) fi fi diff --git a/collectors/charts.d.plugin/exim/README.md b/collectors/charts.d.plugin/exim/README.md index d82951aac..b4c853895 100644 --- a/collectors/charts.d.plugin/exim/README.md +++ b/collectors/charts.d.plugin/exim/README.md @@ -1,2 +1,6 @@ +# exim + > THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT +> USE [THE PYTHON ONE](../../python.d.plugin/exim) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/exim/exim.chart.sh b/collectors/charts.d.plugin/exim/exim.chart.sh index 8099a7249..7b0ef70d2 100644 --- a/collectors/charts.d.plugin/exim/exim.chart.sh +++ b/collectors/charts.d.plugin/exim/exim.chart.sh @@ -17,14 +17,12 @@ exim_update_every=5 exim_priority=60000 exim_check() { - if [ -z "${exim_command}" ] - then - require_cmd exim || return 1 - exim_command="${EXIM_CMD}" - fi - - if [ "$(${exim_command} -bpc 2>&1 | grep -c denied)" -ne 0 ] - then + if [ -z "${exim_command}" ]; then + require_cmd exim || return 1 + exim_command="${EXIM_CMD}" + fi + + if [ "$(${exim_command} -bpc 2>&1 | grep -c denied)" -ne 0 ]; then error "permission denied - please set 'queue_list_requires_admin = false' in your exim options file" return 1 fi @@ -33,16 +31,16 @@ exim_check() { } exim_create() { - cat < THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT - # hddtemp +> THIS MODULE IS OBSOLETE. +> USE [THE PYTHON ONE](../../python.d.plugin/hddtemp) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + The plugin will collect temperatures from disks It will create one chart with all active disks @@ -26,3 +26,5 @@ hddtemp_disks=() ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh index e90310981..a4cef3c3b 100644 --- a/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh +++ b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh @@ -21,7 +21,7 @@ hddtemp_priority=90000 # _check is called once, to find out if this chart should be enabled or not hddtemp_check() { - require_cmd nc || return 1 + require_cmd nc || return 1 run nc $hddtemp_host $hddtemp_port && return 0 || return 1 } @@ -29,17 +29,17 @@ hddtemp_check() { hddtemp_create() { if [ ${#hddtemp_disks[@]} -eq 0 ]; then local all - all=$(nc $hddtemp_host $hddtemp_port ) + all=$(nc $hddtemp_host $hddtemp_port) unset hddtemp_disks # shellcheck disable=SC2190,SC2207 - hddtemp_disks=( $(grep -Po '/dev/[^|]+' <<< "$all" | cut -c 6-) ) + hddtemp_disks=($(grep -Po '/dev/[^|]+' <<<"$all" | cut -c 6-)) fi -# local disk_names -# disk_names=(`sed -e 's/||/\n/g;s/^|//' <<< "$all" | cut -d '|' -f2 | tr ' ' '_'`) + # local disk_names + # disk_names=(`sed -e 's/||/\n/g;s/^|//' <<< "$all" | cut -d '|' -f2 | tr ' ' '_'`) echo "CHART hddtemp.temperature 'disks_temp' 'temperature' 'Celsius' 'Disks temperature' 'hddtemp.temp' line $((hddtemp_priority)) $hddtemp_update_every" - for i in $(seq 0 $((${#hddtemp_disks[@]}-1))); do -# echo "DIMENSION ${hddtemp_disks[i]} ${disk_names[i]} absolute 1 1" + for i in $(seq 0 $((${#hddtemp_disks[@]} - 1))); do + # echo "DIMENSION ${hddtemp_disks[i]} ${disk_names[i]} absolute 1 1" echo "DIMENSION ${hddtemp_disks[$i]} '' absolute 1 1" done return 0 @@ -49,12 +49,12 @@ hddtemp_create() { #hddtemp_last=0 #hddtemp_count=0 hddtemp_update() { -# local all=( `nc $hddtemp_host $hddtemp_port | sed -e 's/||/\n/g;s/^|//' | cut -d '|' -f3` ) -# local all=( `nc $hddtemp_host $hddtemp_port | awk 'BEGIN { FS="|" };{i=4; while (i <= NF) {print $i+0;i+=5;};}'` ) + # local all=( `nc $hddtemp_host $hddtemp_port | sed -e 's/||/\n/g;s/^|//' | cut -d '|' -f3` ) + # local all=( `nc $hddtemp_host $hddtemp_port | awk 'BEGIN { FS="|" };{i=4; while (i <= NF) {print $i+0;i+=5;};}'` ) OLD_IFS=$IFS set -f # shellcheck disable=SC2207 - IFS="|" all=( $(nc $hddtemp_host $hddtemp_port 2>/dev/null) ) + IFS="|" all=($(nc $hddtemp_host $hddtemp_port 2>/dev/null)) set +f IFS=$OLD_IFS @@ -66,9 +66,9 @@ hddtemp_update() { # write the result of the work. echo "BEGIN hddtemp.temperature $1" end=${#hddtemp_disks[@]} - for ((i=0; i THIS MODULE IS OBSOLETE. > THE NETDATA DAEMON COLLECTS LOAD AVERAGE BY ITSELF + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fload_average%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/load_average/load_average.chart.sh b/collectors/charts.d.plugin/load_average/load_average.chart.sh index b30cb850f..841e3d9f6 100644 --- a/collectors/charts.d.plugin/load_average/load_average.chart.sh +++ b/collectors/charts.d.plugin/load_average/load_average.chart.sh @@ -21,8 +21,7 @@ load_average_check() { # - 0 to enable the chart # - 1 to disable the chart - if [ ${load_average_update_every} -lt 5 ] - then + if [ ${load_average_update_every} -lt 5 ]; then # there is no meaning for shorter than 5 seconds # the kernel changes this value every 5 seconds load_average_update_every=5 @@ -34,7 +33,7 @@ load_average_check() { load_average_create() { # create a chart with 3 dimensions -cat <&2 "$0: ERROR: Cannot find the command 'date' in the system path." - exit 1 +if [ -z "$LOOPSLEEP_DATE" ]; then + echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path." + exit 1 fi # ----------------------------------------------------------------------------- @@ -14,14 +13,13 @@ fi now_ms= LOOPSLEEPMS_HIGHRES=1 test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0 -test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0 +test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0 current_time_ms_from_date() { - if [ $LOOPSLEEPMS_HIGHRES -eq 0 ] - then - now_ms="$($LOOPSLEEP_DATE +'%s')000" - else - now_ms="$(( $( $LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000' ) ))" - fi + if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]; then + now_ms="$($LOOPSLEEP_DATE +'%s')000" + else + now_ms="$(($($LOOPSLEEP_DATE +'%s * 1000 + %-N / 1000000')))" + fi } # ----------------------------------------------------------------------------- @@ -32,55 +30,49 @@ current_time_ms_from_uptime_started="${now_ms}" current_time_ms_from_uptime_last="${now_ms}" current_time_ms_from_uptime_first=0 current_time_ms_from_uptime() { - local up rest arr=() n - - read up rest &2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()." - current_time_ms="current_time_ms_from_date" - current_time_ms_from_date - current_time_ms_accuracy=1 - return - fi - - arr=(${up//./ }) - - if [ ${#arr[1]} -lt 1 ] - then - n="${arr[0]}000" - elif [ ${#arr[1]} -lt 2 ] - then - n="${arr[0]}${arr[1]}00" - elif [ ${#arr[1]} -lt 3 ] - then - n="${arr[0]}${arr[1]}0" - else - n="${arr[0]}${arr[1]}" - fi - - now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n)) - - if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ] - then - echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()." - current_time_ms="current_time_ms_from_date" - current_time_ms_from_date - current_time_ms_accuracy=1 - fi - - current_time_ms_from_uptime_last="${now_ms}" + local up rest arr=() n + + read up rest &2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()." + current_time_ms="current_time_ms_from_date" + current_time_ms_from_date + current_time_ms_accuracy=1 + return + fi + + arr=(${up//./ }) + + if [ ${#arr[1]} -lt 1 ]; then + n="${arr[0]}000" + elif [ ${#arr[1]} -lt 2 ]; then + n="${arr[0]}${arr[1]}00" + elif [ ${#arr[1]} -lt 3 ]; then + n="${arr[0]}${arr[1]}0" + else + n="${arr[0]}${arr[1]}" + fi + + now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n)) + + if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ]; then + echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()." + current_time_ms="current_time_ms_from_date" + current_time_ms_from_date + current_time_ms_accuracy=1 + fi + + current_time_ms_from_uptime_last="${now_ms}" } current_time_ms_from_uptime current_time_ms_from_uptime_first="$((now_ms - current_time_ms_from_uptime_started))" current_time_ms_from_uptime_last="${current_time_ms_from_uptime_first}" current_time_ms="current_time_ms_from_uptime" current_time_ms_accuracy=10 -if [ "${current_time_ms_from_uptime_first}" -eq 0 ] - then - echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()." - current_time_ms="current_time_ms_from_date" - current_time_ms_accuracy=1 +if [ "${current_time_ms_from_uptime_first}" -eq 0 ]; then + echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()." + current_time_ms="current_time_ms_from_date" + current_time_ms_accuracy=1 fi # ----------------------------------------------------------------------------- @@ -94,55 +86,48 @@ mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo" [ -p "${mysleep_fifo}" ] && mysleep="mysleep_read" mysleep_read() { - read -t "${1}" <>"${mysleep_fifo}" - ret=$? - if [ $ret -le 128 ] - then - echo >&2 "$0: Cannot use read for sleeping (return code ${ret})." - mysleep="sleep" - ${mysleep} "${1}" - fi + read -t "${1}" <>"${mysleep_fifo}" + ret=$? + if [ $ret -le 128 ]; then + echo >&2 "$0: Cannot use read for sleeping (return code ${ret})." + mysleep="sleep" + ${mysleep} "${1}" + fi } # ----------------------------------------------------------------------------- # use bash loadable module for sleep mysleep_builtin() { - builtin sleep "${1}" - ret=$? - if [ $ret -ne 0 ] - then - echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})." - mysleep="sleep" - ${mysleep} "${1}" - fi + builtin sleep "${1}" + ret=$? + if [ $ret -ne 0 ]; then + echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})." + mysleep="sleep" + ${mysleep} "${1}" + fi } -if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] +0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ] - then - # enable modules only for bash version 3+ - - for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash" - do - [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue - - # check for sleep - for bash_module_sleep in "sleep" "sleep.so" - do - if [ -f "${bash_modules_path}/${bash_module_sleep}" ] - then - if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null - then - mysleep="mysleep_builtin" - # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep" - break - fi - fi - - done - - [ ! -z "${mysleep}" ] && break - done +if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] + 0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ]; then + # enable modules only for bash version 3+ + + for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash"; do + [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue + + # check for sleep + for bash_module_sleep in "sleep" "sleep.so"; do + if [ -f "${bash_modules_path}/${bash_module_sleep}" ]; then + if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null; then + mysleep="mysleep_builtin" + # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep" + break + fi + fi + + done + + [ ! -z "${mysleep}" ] && break + done fi # ----------------------------------------------------------------------------- @@ -150,7 +135,6 @@ fi [ -z "${mysleep}" ] && mysleep="sleep" - # ----------------------------------------------------------------------------- # this function is used to sleep a fraction of a second # it calculates the difference between every time is called @@ -163,60 +147,58 @@ LOOPSLEEPMS_LASTSLEEP=0 LOOPSLEEPMS_LASTWORK=0 loopsleepms() { - local tellwork=0 t="${1}" div s m now mstosleep - - if [ "${t}" = "tellwork" ] - then - tellwork=1 - shift - t="${1}" - fi - - # $t = the time in seconds to wait - - # if high resolution is not supported - # just sleep the time requested, in seconds - if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ] - then - sleep ${t} - return - fi - - # get the current time, in ms in ${now_ms} - ${current_time_ms} - - # calculate ms since last run - [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] && \ - LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy)) - # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms" - - # remember this run - LOOPSLEEPMS_LASTRUN=${now_ms} - - # calculate the next run - LOOPSLEEPMS_NEXTRUN=$(( ( now_ms - ( now_ms % ( t * 1000 ) ) ) + ( t * 1000 ) )) - - # calculate ms to sleep - mstosleep=$(( LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy )) - # echo "# mstosleep is $mstosleep ms" - - # if we are too slow, sleep some time - test ${mstosleep} -lt 200 && mstosleep=200 - - s=$(( mstosleep / 1000 )) - m=$(( mstosleep - (s * 1000) )) - [ "${m}" -lt 100 ] && m="0${m}" - [ "${m}" -lt 10 ] && m="0${m}" - - test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms" - - # echo "# sleeping ${s}.${m}" - # echo - ${mysleep} ${s}.${m} - - # keep the values we need - # for our next run - LOOPSLEEPMS_LASTSLEEP=$mstosleep + local tellwork=0 t="${1}" div s m now mstosleep + + if [ "${t}" = "tellwork" ]; then + tellwork=1 + shift + t="${1}" + fi + + # $t = the time in seconds to wait + + # if high resolution is not supported + # just sleep the time requested, in seconds + if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ]; then + sleep ${t} + return + fi + + # get the current time, in ms in ${now_ms} + ${current_time_ms} + + # calculate ms since last run + [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] && + LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy)) + # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms" + + # remember this run + LOOPSLEEPMS_LASTRUN=${now_ms} + + # calculate the next run + LOOPSLEEPMS_NEXTRUN=$(((now_ms - (now_ms % (t * 1000))) + (t * 1000))) + + # calculate ms to sleep + mstosleep=$((LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy)) + # echo "# mstosleep is $mstosleep ms" + + # if we are too slow, sleep some time + test ${mstosleep} -lt 200 && mstosleep=200 + + s=$((mstosleep / 1000)) + m=$((mstosleep - (s * 1000))) + [ "${m}" -lt 100 ] && m="0${m}" + [ "${m}" -lt 10 ] && m="0${m}" + + test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms" + + # echo "# sleeping ${s}.${m}" + # echo + ${mysleep} ${s}.${m} + + # keep the values we need + # for our next run + LOOPSLEEPMS_LASTSLEEP=$mstosleep } # test it diff --git a/collectors/charts.d.plugin/mem_apps/README.md b/collectors/charts.d.plugin/mem_apps/README.md index cd8adf0a2..a9513e9fe 100644 --- a/collectors/charts.d.plugin/mem_apps/README.md +++ b/collectors/charts.d.plugin/mem_apps/README.md @@ -1,2 +1,6 @@ +# mem_apps + > THIS MODULE IS OBSOLETE. -> USE APPS.PLUGIN. +> USE [APPS.PLUGIN](../../apps.plugin). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmem_apps%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh index a13dc71f1..b9b84a467 100644 --- a/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh +++ b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh @@ -20,8 +20,7 @@ mem_apps_check() { # - 0 to enable the chart # - 1 to disable the chart - if [ -z "$mem_apps_apps" ] - then + if [ -z "$mem_apps_apps" ]; then error "manual configuration required: please set mem_apps_apps='command1 command2 ...' in $confd/mem_apps_apps.conf" return 1 fi @@ -35,8 +34,7 @@ mem_apps_create() { echo "CHART chartsd_apps.mem '' 'Apps Memory' MB apps apps.mem stacked 20000 $mem_apps_update_every" local x= - for x in $mem_apps_apps - do + for x in $mem_apps_apps; do echo "DIMENSION $x $x absolute 1 1024" # this string is needed later in the update() function @@ -52,9 +50,10 @@ mem_apps_update() { # remember: KEEP IT SIMPLE AND SHORT echo "BEGIN chartsd_apps.mem" - ps -o comm,rss -C "$mem_apps_apps" |\ - grep -v "^COMMAND" |\ - ( sed -e "s/ \+/ /g" -e "s/ /+=/g"; + ps -o comm,rss -C "$mem_apps_apps" | + grep -v "^COMMAND" | + ( + sed -e "s/ \+/ /g" -e "s/ /+=/g" echo "$mem_apps_bc_finalze" ) | bc echo "END" diff --git a/collectors/charts.d.plugin/mysql/README.md b/collectors/charts.d.plugin/mysql/README.md index 6765b53ab..e52449a4f 100644 --- a/collectors/charts.d.plugin/mysql/README.md +++ b/collectors/charts.d.plugin/mysql/README.md @@ -1,8 +1,8 @@ -> THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT - # mysql +> THIS MODULE IS OBSOLETE. +> USE [THE PYTHON ONE](../../python.d.plugin/mysql) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + The plugin will monitor one or more mysql servers It will produce the following charts: @@ -79,3 +79,5 @@ If no configuration is given, the plugin will attempt to connect to mysql server --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/mysql/mysql.chart.sh b/collectors/charts.d.plugin/mysql/mysql.chart.sh index 37e8e2a7c..e1207dc9a 100644 --- a/collectors/charts.d.plugin/mysql/mysql.chart.sh +++ b/collectors/charts.d.plugin/mysql/mysql.chart.sh @@ -27,19 +27,19 @@ mysql_get() { #arr=($(run "${@}" -e "SHOW GLOBAL STATUS WHERE value REGEXP '^[0-9]';" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)" )) #arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | egrep "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^ ]+\s[0-9]" )) # shellcheck disable=SC2207 - arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | grep -E "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^[:space:]]+[[:space:]]+[0-9]+" )) + arr=($(run "${@}" -N -e "SHOW GLOBAL STATUS;" | grep -E "^(Bytes|Slow_|Que|Handl|Table|Selec|Sort_|Creat|Conne|Abort|Binlo|Threa|Innod|Qcach|Key_|Open)[^[:space:]]+[[:space:]]+[0-9]+")) IFS="${oIFS}" [ "${#arr[@]}" -lt 3 ] && return 1 local end=${#arr[@]} - for ((i=2;i THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT +> USE [THE PYTHON ONE](../../python.d.plugin/nginx) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/nginx/nginx.chart.sh b/collectors/charts.d.plugin/nginx/nginx.chart.sh index 14dda0832..812de2cbb 100644 --- a/collectors/charts.d.plugin/nginx/nginx.chart.sh +++ b/collectors/charts.d.plugin/nginx/nginx.chart.sh @@ -32,16 +32,15 @@ nginx_get() { # shellcheck disable=SC2181 if [ $? -ne 0 ] || [ "${#nginx_response[@]}" -eq 0 ]; then return 1; fi - if [ "${nginx_response[0]}" != "Active" ] ||\ - [ "${nginx_response[1]}" != "connections:" ] ||\ - [ "${nginx_response[3]}" != "server" ] ||\ - [ "${nginx_response[4]}" != "accepts" ] ||\ - [ "${nginx_response[5]}" != "handled" ] ||\ - [ "${nginx_response[6]}" != "requests" ] ||\ - [ "${nginx_response[10]}" != "Reading:" ] ||\ - [ "${nginx_response[12]}" != "Writing:" ] ||\ - [ "${nginx_response[14]}" != "Waiting:" ] - then + if [ "${nginx_response[0]}" != "Active" ] || + [ "${nginx_response[1]}" != "connections:" ] || + [ "${nginx_response[3]}" != "server" ] || + [ "${nginx_response[4]}" != "accepts" ] || + [ "${nginx_response[5]}" != "handled" ] || + [ "${nginx_response[6]}" != "requests" ] || + [ "${nginx_response[10]}" != "Reading:" ] || + [ "${nginx_response[12]}" != "Writing:" ] || + [ "${nginx_response[14]}" != "Waiting:" ]; then error "Invalid response from nginx server: ${nginx_response[*]}" return 1 fi @@ -54,14 +53,13 @@ nginx_get() { nginx_writing="${nginx_response[13]}" nginx_waiting="${nginx_response[15]}" - if [ -z "${nginx_active_connections}" ] ||\ - [ -z "${nginx_accepts}" ] ||\ - [ -z "${nginx_handled}" ] ||\ - [ -z "${nginx_requests}" ] ||\ - [ -z "${nginx_reading}" ] ||\ - [ -z "${nginx_writing}" ] ||\ - [ -z "${nginx_waiting}" ] - then + if [ -z "${nginx_active_connections}" ] || + [ -z "${nginx_accepts}" ] || + [ -z "${nginx_handled}" ] || + [ -z "${nginx_requests}" ] || + [ -z "${nginx_reading}" ] || + [ -z "${nginx_writing}" ] || + [ -z "${nginx_waiting}" ]; then error "empty values got from nginx server: ${nginx_response[*]}" return 1 fi @@ -74,8 +72,7 @@ nginx_check() { nginx_get # shellcheck disable=2181 - if [ $? -ne 0 ] - then + if [ $? -ne 0 ]; then # shellcheck disable=SC2154 error "cannot find stub_status on URL '${nginx_url}'. Please set nginx_url='http://nginx.server/stub_status' in $confd/nginx.conf" return 1 diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md index 71906f55a..3e1699363 100644 --- a/collectors/charts.d.plugin/nut/README.md +++ b/collectors/charts.d.plugin/nut/README.md @@ -57,3 +57,5 @@ nut_update_every=2 ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fnut%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh index 7e252f325..933d3561d 100644 --- a/collectors/charts.d.plugin/nut/nut.chart.sh +++ b/collectors/charts.d.plugin/nut/nut.chart.sh @@ -34,8 +34,7 @@ nut_get_all() { nut_get() { run -t $nut_timeout upsc "$1" - if [ "${nut_clients_chart}" -eq "1" ] - then + if [ "${nut_clients_chart}" -eq "1" ]; then printf "ups.connected_clients: " run -t $nut_timeout upsc -c "$1" | wc -l fi @@ -51,27 +50,23 @@ nut_check() { require_cmd upsc || return 1 - [ -z "$nut_ups" ] && nut_ups="$( nut_get_all )" + [ -z "$nut_ups" ] && nut_ups="$(nut_get_all)" - for x in $nut_ups - do + for x in $nut_ups; do nut_get "$x" >/dev/null # shellcheck disable=SC2181 - if [ $? -eq 0 ] - then - if [ ! -z "${nut_names[${x}]}" ] - then - nut_ids[$x]="$( fixid "${nut_names[${x}]}" )" + if [ $? -eq 0 ]; then + if [ ! -z "${nut_names[${x}]}" ]; then + nut_ids[$x]="$(fixid "${nut_names[${x}]}")" else - nut_ids[$x]="$( fixid "$x" )" + nut_ids[$x]="$(fixid "$x")" fi continue fi error "cannot get information for NUT UPS '$x'." done - if [ ${#nut_ids[@]} -eq 0 ] - then + if [ ${#nut_ids[@]} -eq 0 ]; then # shellcheck disable=SC2154 error "Cannot find UPSes - please set nut_ups='ups_name' in $confd/nut.conf" return 1 @@ -84,8 +79,7 @@ nut_create() { # create the charts local x - for x in "${nut_ids[@]}" - do + for x in "${nut_ids[@]}"; do cat < THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT +> USE [THE PYTHON ONE](../../python.d.plugin/phpfpm) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh index 1af7910bc..b1edb2373 100644 --- a/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh +++ b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh @@ -44,17 +44,7 @@ phpfpm_get() { return 1 fi - if [[ "${phpfpm_response[0]}" != "pool:" \ - || "${phpfpm_response[2]}" != "process" \ - || "${phpfpm_response[5]}" != "start" \ - || "${phpfpm_response[12]}" != "accepted" \ - || "${phpfpm_response[15]}" != "listen" \ - || "${phpfpm_response[16]}" != "queue:" \ - || "${phpfpm_response[26]}" != "idle" \ - || "${phpfpm_response[29]}" != "active" \ - || "${phpfpm_response[32]}" != "total" \ - ]] - then + if [[ ${phpfpm_response[0]} != "pool:" || ${phpfpm_response[2]} != "process" || ${phpfpm_response[5]} != "start" || ${phpfpm_response[12]} != "accepted" || ${phpfpm_response[15]} != "listen" || ${phpfpm_response[16]} != "queue:" || ${phpfpm_response[26]} != "idle" || ${phpfpm_response[29]} != "active" || ${phpfpm_response[32]} != "total" ]]; then error "invalid response from phpfpm status server: ${phpfpm_response[*]}" return 1 fi @@ -71,27 +61,13 @@ phpfpm_get() { phpfpm_total_processes="${phpfpm_response[34]}" phpfpm_max_active_processes="${phpfpm_response[38]}" phpfpm_max_children_reached="${phpfpm_response[42]}" - if [ "${phpfpm_response[43]}" == "slow" ] - then - phpfpm_slow_requests="${phpfpm_response[45]}" + if [ "${phpfpm_response[43]}" == "slow" ]; then + phpfpm_slow_requests="${phpfpm_response[45]}" else - phpfpm_slow_requests="-1" + phpfpm_slow_requests="-1" fi - if [[ -z "${phpfpm_pool}" \ - || -z "${phpfpm_start_time}" \ - || -z "${phpfpm_start_since}" \ - || -z "${phpfpm_accepted_conn}" \ - || -z "${phpfpm_listen_queue}" \ - || -z "${phpfpm_max_listen_queue}" \ - || -z "${phpfpm_listen_queue_len}" \ - || -z "${phpfpm_idle_processes}" \ - || -z "${phpfpm_active_processes}" \ - || -z "${phpfpm_total_processes}" \ - || -z "${phpfpm_max_active_processes}" \ - || -z "${phpfpm_max_children_reached}" \ - ]] - then + if [[ -z ${phpfpm_pool} || -z ${phpfpm_start_time} || -z ${phpfpm_start_since} || -z ${phpfpm_accepted_conn} || -z ${phpfpm_listen_queue} || -z ${phpfpm_max_listen_queue} || -z ${phpfpm_listen_queue_len} || -z ${phpfpm_idle_processes} || -z ${phpfpm_active_processes} || -z ${phpfpm_total_processes} || -z ${phpfpm_max_active_processes} || -z ${phpfpm_max_children_reached} ]]; then error "empty values got from phpfpm status server: ${phpfpm_response[*]}" return 1 fi @@ -106,8 +82,7 @@ phpfpm_check() { fi local m - for m in "${!phpfpm_urls[@]}" - do + for m in "${!phpfpm_urls[@]}"; do phpfpm_get "${phpfpm_curl_opts[$m]}" "${phpfpm_urls[$m]}" # shellcheck disable=SC2181 if [ $? -ne 0 ]; then @@ -133,8 +108,7 @@ phpfpm_check() { # _create is called once, to create the charts phpfpm_create() { local m - for m in "${!phpfpm_urls[@]}" - do + for m in "${!phpfpm_urls[@]}"; do cat < THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT - # postfix +> THIS MODULE IS OBSOLETE. +> USE [THE PYTHON ONE](../../python.d.plugin/postfix) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + The plugin will collect the postfix queue size. It will create two charts: @@ -24,3 +24,5 @@ postfix_update_every=15 ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/postfix/postfix.chart.sh b/collectors/charts.d.plugin/postfix/postfix.chart.sh index 8cb938ce1..ff59db9fe 100644 --- a/collectors/charts.d.plugin/postfix/postfix.chart.sh +++ b/collectors/charts.d.plugin/postfix/postfix.chart.sh @@ -22,14 +22,12 @@ postfix_check() { # - 1 to disable the chart # try to find the postqueue executable - if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ] - then + if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]; then # shellcheck disable=SC2230 postfix_postqueue="$(which postqueue 2>/dev/null || command -v postqueue 2>/dev/null)" fi - if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ] - then + if [ -z "$postfix_postqueue" ] || [ ! -x "$postfix_postqueue" ]; then # shellcheck disable=SC2154 error "cannot find postqueue. Please set 'postfix_postqueue=/path/to/postqueue' in $confd/postfix.conf" return 1 @@ -39,7 +37,7 @@ postfix_check() { } postfix_create() { -cat < THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT +> USE [THE PYTHON ONE](../../python.d.plugin/sensors) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT > Unlike the python one, this module can collect temperature on RPi. -# sensors The plugin will provide charts for all configured system sensors @@ -50,3 +51,5 @@ sensors_excluded=() ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/sensors/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh index 54368f1e0..b92187772 100644 --- a/collectors/charts.d.plugin/sensors/sensors.chart.sh +++ b/collectors/charts.d.plugin/sensors/sensors.chart.sh @@ -38,8 +38,7 @@ sensors_find_all_files() { sensors_find_all_dirs() { # shellcheck disable=SC2162 - sensors_find_all_files "$1" | while read - do + sensors_find_all_files "$1" | while read; do dirname "$REPLY" done | sort -u } @@ -51,7 +50,7 @@ sensors_check() { # - 0 to enable the chart # - 1 to disable the chart - [ -z "$( sensors_find_all_files "$sensors_sys_dir" )" ] && error "no sensors found in '$sensors_sys_dir'." && return 1 + [ -z "$(sensors_find_all_files "$sensors_sys_dir")" ] && error "no sensors found in '$sensors_sys_dir'." && return 1 return 0 } @@ -60,15 +59,14 @@ sensors_check_files() { # also remove not needed sensors local f v excluded - for f in "$@" - do + for f in "$@"; do [ ! -f "$f" ] && continue for ex in "${sensors_excluded[@]}"; do [[ $f =~ .*$ex$ ]] && excluded='1' && break done - [ "$excluded" != "1" ] && v="$( cat "$f" )" || v=0 - v=$(( v + 1 - 1 )) + [ "$excluded" != "1" ] && v="$(cat "$f")" || v=0 + v=$((v + 1 - 1)) [ $v -ne 0 ] && echo "$f" && continue excluded= @@ -81,15 +79,14 @@ sensors_check_temp_type() { # disabled sensors have the value 0 local f t v - for f in "$@" - do + for f in "$@"; do # shellcheck disable=SC2001 - t=$( echo "$f" | sed "s|_input$|_type|g" ) + t=$(echo "$f" | sed "s|_input$|_type|g") [ "$f" = "$t" ] && echo "$f" && continue [ ! -f "$t" ] && echo "$f" && continue - v="$( cat "$t" )" - v=$(( v + 1 - 1 )) + v="$(cat "$t")" + v=$((v + 1 - 1)) [ $v -ne 0 ] && echo "$f" && continue error "$f is disabled" @@ -105,120 +102,119 @@ sensors_create() { # - the highest speed we can achieve - [ $sensors_source_update -eq 1 ] && echo >"$TMP_DIR/sensors.sh" "sensors_update() {" - for path in $( sensors_find_all_dirs "$sensors_sys_dir" | sort -u ) - do - dir=$( basename "$path" ) + for path in $(sensors_find_all_dirs "$sensors_sys_dir" | sort -u); do + dir=$(basename "$path") device= subsystem= id= type= name= - [ -h "$path/device" ] && device=$( readlink -f "$path/device" ) - [ ! -z "$device" ] && device=$( basename "$device" ) + [ -h "$path/device" ] && device=$(readlink -f "$path/device") + [ ! -z "$device" ] && device=$(basename "$device") [ -z "$device" ] && device="$dir" - [ -h "$path/subsystem" ] && subsystem=$( readlink -f "$path/subsystem" ) - [ ! -z "$subsystem" ] && subsystem=$( basename "$subsystem" ) + [ -h "$path/subsystem" ] && subsystem=$(readlink -f "$path/subsystem") + [ ! -z "$subsystem" ] && subsystem=$(basename "$subsystem") [ -z "$subsystem" ] && subsystem="$dir" - [ -f "$path/name" ] && name=$( cat "$path/name" ) + [ -f "$path/name" ] && name=$(cat "$path/name") [ -z "$name" ] && name="$dir" - [ -f "$path/type" ] && type=$( cat "$path/type" ) + [ -f "$path/type" ] && type=$(cat "$path/type") [ -z "$type" ] && type="$dir" - id="$( fixid "$device.$subsystem.$dir" )" + id="$(fixid "$device.$subsystem.$dir")" debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'" - for mode in temperature voltage fans power current energy humidity - do + for mode in temperature voltage fans power current energy humidity; do files= multiplier=1 divisor=1 algorithm="absolute" case $mode in - temperature) - files="$( ls "$path"/temp*_input 2>/dev/null; ls "$path/temp" 2>/dev/null )" - files="$( sensors_check_files "$files" )" - files="$( sensors_check_temp_type "$files" )" - [ -z "$files" ] && continue - echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.temp_$id \$1\"" - divisor=1000 - ;; - - voltage) - files="$( ls "$path"/in*_input 2>/dev/null )" - files="$( sensors_check_files "$files" )" - [ -z "$files" ] && continue - echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.volt_$id \$1\"" - divisor=1000 - ;; - - current) - files="$( ls "$path"/curr*_input 2>/dev/null )" - files="$( sensors_check_files "$files" )" - [ -z "$files" ] && continue - echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.curr_$id \$1\"" - divisor=1000 - ;; - - power) - files="$( ls "$path"/power*_input 2>/dev/null )" - files="$( sensors_check_files "$files" )" - [ -z "$files" ] && continue - echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.power_$id \$1\"" - divisor=1000000 - ;; - - fans) - files="$( ls "$path"/fan*_input 2>/dev/null )" - files="$( sensors_check_files "$files" )" - [ -z "$files" ] && continue - echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.fan_$id \$1\"" - ;; - - energy) - files="$( ls "$path"/energy*_input 2>/dev/null )" - files="$( sensors_check_files "$files" )" - [ -z "$files" ] && continue - echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.energy_$id \$1\"" - algorithm="incremental" - divisor=1000000 - ;; - - humidity) - files="$( ls "$path"/humidity*_input 2>/dev/null )" - files="$( sensors_check_files "$files" )" - [ -z "$files" ] && continue - echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every" - echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.humidity_$id \$1\"" - divisor=1000 - ;; - - *) - continue - ;; + temperature) + files="$( + ls "$path"/temp*_input 2>/dev/null + ls "$path/temp" 2>/dev/null + )" + files="$(sensors_check_files "$files")" + files="$(sensors_check_temp_type "$files")" + [ -z "$files" ] && continue + echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.temp_$id \$1\"" + divisor=1000 + ;; + + voltage) + files="$(ls "$path"/in*_input 2>/dev/null)" + files="$(sensors_check_files "$files")" + [ -z "$files" ] && continue + echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.volt_$id \$1\"" + divisor=1000 + ;; + + current) + files="$(ls "$path"/curr*_input 2>/dev/null)" + files="$(sensors_check_files "$files")" + [ -z "$files" ] && continue + echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.curr_$id \$1\"" + divisor=1000 + ;; + + power) + files="$(ls "$path"/power*_input 2>/dev/null)" + files="$(sensors_check_files "$files")" + [ -z "$files" ] && continue + echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.power_$id \$1\"" + divisor=1000000 + ;; + + fans) + files="$(ls "$path"/fan*_input 2>/dev/null)" + files="$(sensors_check_files "$files")" + [ -z "$files" ] && continue + echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.fan_$id \$1\"" + ;; + + energy) + files="$(ls "$path"/energy*_input 2>/dev/null)" + files="$(sensors_check_files "$files")" + [ -z "$files" ] && continue + echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.energy_$id \$1\"" + algorithm="incremental" + divisor=1000000 + ;; + + humidity) + files="$(ls "$path"/humidity*_input 2>/dev/null)" + files="$(sensors_check_files "$files")" + [ -z "$files" ] && continue + echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every" + echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.humidity_$id \$1\"" + divisor=1000 + ;; + + *) + continue + ;; esac - for x in $files - do + for x in $files; do file="$x" - fid="$( fixid "$file" )" - lfile="$( basename "$file" | sed "s|_input$|_label|g" )" - labelname="$( basename "$file" | sed "s|_input$||g" )" + fid="$(fixid "$file")" + lfile="$(basename "$file" | sed "s|_input$|_label|g")" + labelname="$(basename "$file" | sed "s|_input$||g")" - if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ] - then - labelname="$( cat "$path/$lfile" )" + if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ]; then + labelname="$(cat "$path/$lfile")" fi echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor" @@ -252,4 +248,3 @@ sensors_update() { return 0 } - diff --git a/collectors/charts.d.plugin/squid/README.md b/collectors/charts.d.plugin/squid/README.md index 0934ccfcf..cfb61790a 100644 --- a/collectors/charts.d.plugin/squid/README.md +++ b/collectors/charts.d.plugin/squid/README.md @@ -1,9 +1,8 @@ -> THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT - - # squid +> THIS MODULE IS OBSOLETE. +> USE [THE PYTHON ONE](../../python.d.plugin/squid) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + The plugin will monitor a squid server. It will produce 4 charts: @@ -64,3 +63,5 @@ squid_update_every=5 ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/squid/squid.chart.sh b/collectors/charts.d.plugin/squid/squid.chart.sh index cf5d1d78a..ebddb32c6 100644 --- a/collectors/charts.d.plugin/squid/squid.chart.sh +++ b/collectors/charts.d.plugin/squid/squid.chart.sh @@ -25,13 +25,10 @@ squid_get_stats() { squid_autodetect() { local host="127.0.0.1" port url x - for port in 3128 8080 - do - for url in "cache_object://$host:$port/counters" "/squid-internal-mgr/counters" - do + for port in 3128 8080; do + for url in "cache_object://$host:$port/counters" "/squid-internal-mgr/counters"; do x=$(squid_get_stats_internal "$host" "$port" "$url" | grep client_http.requests) - if [ ! -z "$x" ] - then + if [ ! -z "$x" ]; then squid_host="$host" squid_port="$port" squid_url="$url" @@ -50,8 +47,7 @@ squid_check() { require_cmd sed || return 1 require_cmd egrep || return 1 - if [ -z "$squid_host" ] || [ -z "$squid_port" ] || [ -z "$squid_url" ] - then + if [ -z "$squid_host" ] || [ -z "$squid_port" ] || [ -z "$squid_url" ]; then squid_autodetect || return 1 fi @@ -59,8 +55,7 @@ squid_check() { local x x="$(squid_get_stats | grep client_http.requests)" # shellcheck disable=SC2181 - if [ ! $? -eq 0 ] || [ -z "$x" ] - then + if [ ! $? -eq 0 ] || [ -z "$x" ]; then error "cannot fetch URL '$squid_url' by connecting to $squid_host:$squid_port. Please set squid_url='url' and squid_host='host' and squid_port='port' in $confd/squid.conf" return 1 fi @@ -93,7 +88,6 @@ EOF return 0 } - squid_update() { # the first argument to this function is the microseconds since last update # pass this parameter to the BEGIN statement (see bellow). @@ -114,8 +108,8 @@ squid_update() { # even if something goes wrong, no other code can be executed # shellcheck disable=SC1117 - eval "$(squid_get_stats |\ - sed -e "s/ \+/ /g" -e "s/\./_/g" -e "s/^\([a-z0-9_]\+\) *= *\([0-9]\+\)$/local squid_\1=\2/g" |\ + eval "$(squid_get_stats | + sed -e "s/ \+/ /g" -e "s/\./_/g" -e "s/^\([a-z0-9_]\+\) *= *\([0-9]\+\)$/local squid_\1=\2/g" | grep -E "^local squid_(client_http|server_all)_[a-z0-9_]+=[0-9]+$")" # write the result of the work. diff --git a/collectors/charts.d.plugin/tomcat/README.md b/collectors/charts.d.plugin/tomcat/README.md index d82951aac..843378604 100644 --- a/collectors/charts.d.plugin/tomcat/README.md +++ b/collectors/charts.d.plugin/tomcat/README.md @@ -1,2 +1,6 @@ +# tomcat + > THIS MODULE IS OBSOLETE. -> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT +> USE [THE PYTHON ONE](../../python.d.plugin/tomcat) - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fcharts.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/charts.d.plugin/tomcat/tomcat.chart.sh b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh index 294487b8b..9ca75e63e 100644 --- a/collectors/charts.d.plugin/tomcat/tomcat.chart.sh +++ b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh @@ -32,24 +32,23 @@ tomcat_priority=60000 # will be in the proper units tomcat_decimal_detail=1000000 -# used by volume chart to convert bytes to KB -tomcat_decimal_KB_detail=1000 +# used by volume chart to convert bytes to kB +tomcat_decimal_kB_detail=1000 tomcat_check() { require_cmd xmlstarlet || return 1 - # check if url, username, passwords are set if [ -z "${tomcat_url}" ]; then - error "tomcat url is unset or set to the empty string" + error "tomcat url is unset or set to the empty string" return 1 fi if [ -z "${tomcat_user}" ]; then # check backwards compatibility # shellcheck disable=SC2154 if [ -z "${tomcatUser}" ]; then - error "tomcat user is unset or set to the empty string" + error "tomcat user is unset or set to the empty string" return 1 else tomcat_user="${tomcatUser}" @@ -59,7 +58,7 @@ tomcat_check() { # check backwards compatibility # shellcheck disable=SC2154 if [ -z "${tomcatPassword}" ]; then - error "tomcat password is unset or set to the empty string" + error "tomcat password is unset or set to the empty string" return 1 else tomcat_password="${tomcatPassword}" @@ -69,8 +68,7 @@ tomcat_check() { # check if we can get to tomcat's status page tomcat_get # shellcheck disable=2181 - if [ $? -ne 0 ] - then + if [ $? -ne 0 ]; then error "cannot get to status page on URL '${tomcat_url}'. Please make sure tomcat url, username and password are correct." return 1 fi @@ -84,8 +82,12 @@ tomcat_check() { tomcat_get() { # collect tomcat values - tomcat_port="$(IFS=/ read -ra a <<< "$tomcat_url"; hostport=${a[2]}; echo "${hostport#*:}")" - mapfile -t lines < <(run curl -u "$tomcat_user":"$tomcat_password" -Ss ${tomcat_curl_opts} "$tomcat_url" |\ + tomcat_port="$( + IFS=/ read -ra a <<<"$tomcat_url" + hostport=${a[2]} + echo "${hostport#*:}" + )" + mapfile -t lines < <(run curl -u "$tomcat_user":"$tomcat_password" -Ss ${tomcat_curl_opts} "$tomcat_url" | run xmlstarlet sel \ -t -m "/status/jvm/memory" -v @free \ -n -m "/status/connector[@name='\"http-bio-$tomcat_port\"']/threadInfo" -v @currentThreadCount \ @@ -107,8 +109,8 @@ tomcat_create() { cat <&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/checks.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/checks.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/checks.plugin/README.md b/collectors/checks.plugin/README.md index 503b96ada..461e3ba8e 100644 --- a/collectors/checks.plugin/README.md +++ b/collectors/checks.plugin/README.md @@ -1,3 +1,5 @@ -# Netdata internal checks +# checks.plugin A debugging plugin (by default it is disabled) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fchecks.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/cups.plugin/Makefile.am b/collectors/cups.plugin/Makefile.am new file mode 100644 index 000000000..ca4d4ddd7 --- /dev/null +++ b/collectors/cups.plugin/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/cups.plugin/README.md b/collectors/cups.plugin/README.md new file mode 100644 index 000000000..7baf88559 --- /dev/null +++ b/collectors/cups.plugin/README.md @@ -0,0 +1,49 @@ +# cups.plugin + +`cups.plugin` collects Common Unix Printing System (CUPS) metrics. + +## Prerequisites + +This plugin needs a running local CUPS daemon (`cupsd`). This plugin does not need any configuration. + +## Charts + +`cups.plugin` provides one common section `destinations` and one section per destination. + +> Destinations in CUPS represent individual printers or classes (collections or pools) of printers (https://www.cups.org/doc/cupspm.html#working-with-destinations) + +The section `server` provides these charts: + +1. **destinations by state** + * idle + * printing + * stopped + +2. **destinations by options** + * total + * accepting jobs + * shared + +3. **total job number by status** + * pending + * processing + * held + +4. **total job size by status** + * pending + * processing + * held + +For each destination the plugin provides these charts: + +1. **job number by status** + * pending + * held + * processing + +3. **job size by status** + * pending + * held + * processing + +At the moment only job status pending, processing, and held are reported because we do not have a method to collect stopped, canceled, aborted and completed jobs which scales. diff --git a/collectors/cups.plugin/cups_plugin.c b/collectors/cups.plugin/cups_plugin.c new file mode 100644 index 000000000..7fbba2c46 --- /dev/null +++ b/collectors/cups.plugin/cups_plugin.c @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* + * netdata cups.plugin + * (C) Copyright 2017-2018 Simon Nagl + * Released under GPL v3+ + */ + +#include "../../libnetdata/libnetdata.h" +#include + +// callback required by fatal() +void netdata_cleanup_and_exit(int ret) { + exit(ret); +} + +void send_statistics( const char *action, const char *action_result, const char *action_data) { + (void) action; + (void) action_result; + (void) action_data; + return; +} + +// callbacks required by popen() +void signals_block(void) {}; +void signals_unblock(void) {}; +void signals_reset(void) {}; + +// callback required by eval() +int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) { + (void)variable; + (void)hash; + (void)rc; + (void)result; + return 0; +}; + +// required by get_system_cpus() +char *netdata_configured_host_prefix = ""; + +// Variables + +static int debug = 0; + +static int netdata_update_every = 1; +static int netdata_priority = 100004; + + +#ifdef HAVE_CUPS +#include + +http_t *http; // connection to the cups daemon + +/* + * Used to aggregate job metrics for a destination (and all destianations). + */ +struct job_metrics { + int is_collected; // flag if this was collected in the current cycle + + int num_pending; + int num_processing; + int num_held; + + int size_pending; // in kilobyte + int size_processing; // in kilobyte + int size_held; // in kilobyte +}; +DICTIONARY *dict_dest_job_metrics = NULL; +struct job_metrics global_job_metrics; + +int num_dest_total; +int num_dest_accepting_jobs; +int num_dest_shared; + +int num_dest_idle; +int num_dest_printing; +int num_dest_stopped; + +void print_help() { + fprintf(stderr, + "\n" + "netdata cups.plugin %s\n" + "\n" + "Copyright (C) 2017-2018 Simon Nagl \n" + "Released under GNU General Public License v3+.\n" + "All rights reserved.\n" + "\n" + "This program is a data collector plugin for netdata.\n" + "\n" + "SYNOPSIS: cups.plugin [-d][-h][-v] COLLECTION_FREQUENCY\n" + "\n" + "Options:" + "\n" + " COLLECTION_FREQUENCY data collection frequency in seconds\n" + "\n" + " -d enable verbose output\n" + " default: disabled\n" + "\n" + " -v print version and exit\n" + "\n" + " -h print this message and exit\n" + "\n", + VERSION); +} + +void parse_command_line(int argc, char **argv) { + int i; + int freq = 0; + int update_every_found = 0; + for (i = 1; i < argc; i++) { + if (isdigit(*argv[i]) && !update_every_found) { + int n = str2i(argv[i]); + if (n > 0 && n < 86400) { + freq = n; + continue; + } + } else if (strcmp("-v", argv[i]) == 0) { + printf("cups.plugin %s\n", VERSION); + exit(0); + } else if (strcmp("-d", argv[i]) == 0) { + debug = 1; + continue; + } else if (strcmp("-h", argv[i]) == 0) { + print_help(); + exit(0); + } + + print_help(); + exit(1); + } + + if (freq >= netdata_update_every) { + netdata_update_every = freq; + } else if (freq) { + error("update frequency %d seconds is too small for CUPS. Using %d.", freq, netdata_update_every); + } +} + +int reset_job_metrics(void *entry, void *data) { + (void)data; + + struct job_metrics *jm = (struct job_metrics *)entry; + + jm->is_collected = 0; + jm->num_held = 0; + jm->num_pending = 0; + jm->num_processing = 0; + jm->size_held = 0; + jm->size_pending = 0; + jm->size_processing = 0; + + return 0; +} + +struct job_metrics *get_job_metrics(char *dest) { + struct job_metrics *jm = dictionary_get(dict_dest_job_metrics, dest); + + if (unlikely(!jm)) { + struct job_metrics new_job_metrics; + reset_job_metrics(&new_job_metrics, NULL); + jm = dictionary_set(dict_dest_job_metrics, dest, &new_job_metrics, sizeof(struct job_metrics)); + + printf("CHART cups.job_num_%s '' 'Active job number of destination %s' jobs '%s' job_num stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every); + printf("DIMENSION pending '' absolute 1 1\n"); + printf("DIMENSION held '' absolute 1 1\n"); + printf("DIMENSION processing '' absolute 1 1\n"); + + printf("CHART cups.job_size_%s '' 'Active job size of destination %s' KB '%s' job_size stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every); + printf("DIMENSION pending '' absolute 1 1\n"); + printf("DIMENSION held '' absolute 1 1\n"); + printf("DIMENSION processing '' absolute 1 1\n"); + }; + return jm; +} + +int collect_job_metrics(char *name, void *entry, void *data) { + (void)data; + + struct job_metrics *jm = (struct job_metrics *)entry; + + if (jm->is_collected) { + printf( + "BEGIN cups.job_num_%s\n" + "SET pending = %d\n" + "SET held = %d\n" + "SET processing = %d\n" + "END\n", + name, jm->num_pending, jm->num_held, jm->num_processing); + printf( + "BEGIN cups.job_size_%s\n" + "SET pending = %d\n" + "SET held = %d\n" + "SET processing = %d\n" + "END\n", + name, jm->size_pending, jm->size_held, jm->size_processing); + } else { + printf("CHART cups.job_num_%s '' 'Active job number of destination %s' jobs '%s' job_num stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every); + printf("DIMENSION pending '' absolute 1 1\n"); + printf("DIMENSION held '' absolute 1 1\n"); + printf("DIMENSION processing '' absolute 1 1\n"); + + printf("CHART cups.job_size_%s '' 'Active job size of destination %s' KB '%s' job_size stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every); + printf("DIMENSION pending '' absolute 1 1\n"); + printf("DIMENSION held '' absolute 1 1\n"); + printf("DIMENSION processing '' absolute 1 1\n"); + dictionary_del(dict_dest_job_metrics, name); + } + + return 0; +} + +void reset_metrics() { + num_dest_total = 0; + num_dest_accepting_jobs = 0; + num_dest_shared = 0; + + num_dest_idle = 0; + num_dest_printing = 0; + num_dest_stopped = 0; + + reset_job_metrics(&global_job_metrics, NULL); + dictionary_get_all(dict_dest_job_metrics, reset_job_metrics, NULL); +} + +int main(int argc, char **argv) { + + // ------------------------------------------------------------------------ + // initialization of netdata plugin + + program_name = "cups.plugin"; + + // disable syslog + error_log_syslog = 0; + + // set errors flood protection to 100 logs per hour + error_log_errors_per_period = 100; + error_log_throttle_period = 3600; + + parse_command_line(argc, argv); + + errno = 0; + + dict_dest_job_metrics = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED); + + // ------------------------------------------------------------------------ + // the main loop + + if (debug) + fprintf(stderr, "starting data collection\n"); + + time_t started_t = now_monotonic_sec(); + size_t iteration = 0; + usec_t step = netdata_update_every * USEC_PER_SEC; + + heartbeat_t hb; + heartbeat_init(&hb); + for (iteration = 0; 1; iteration++) + { + heartbeat_next(&hb, step); + + if (unlikely(netdata_exit)) + { + break; + } + + reset_metrics(); + + cups_dest_t *dests; + num_dest_total = cupsGetDests2(http, &dests); + + if(unlikely(num_dest_total == 0)) { + // reconnect to cups to check if the server is down. + httpClose(http); + http = httpConnect2(cupsServer(), ippPort(), NULL, AF_UNSPEC, cupsEncryption(), 0, netdata_update_every * 1000, NULL); + if(http == NULL) { + error("cups daemon is not running. Exiting!"); + exit(1); + } + } + + cups_dest_t *curr_dest = dests; + int counter = 0; + while (counter < num_dest_total) { + if (counter != 0) { + curr_dest++; + } + counter++; + + const char *printer_uri_supported = cupsGetOption("printer-uri-supported", curr_dest->num_options, curr_dest->options); + if (!printer_uri_supported) { + if(debug) + fprintf(stderr, "destination %s discovered, but not yet setup as a local printer", curr_dest->name); + continue; + } + + const char *printer_is_accepting_jobs = cupsGetOption("printer-is-accepting-jobs", curr_dest->num_options, curr_dest->options); + if (printer_is_accepting_jobs && !strcmp(printer_is_accepting_jobs, "true")) { + num_dest_accepting_jobs++; + } + + const char *printer_is_shared = cupsGetOption("printer-is-shared", curr_dest->num_options, curr_dest->options); + if (printer_is_shared && !strcmp(printer_is_shared, "true")) { + num_dest_shared++; + } + + // TODO use cupsGetIntegerOption + int printer_state = cupsGetIntegerOption("printer-state", curr_dest->num_options, curr_dest->options); + switch (printer_state) { + case 3: + num_dest_idle++; + break; + case 4: + num_dest_printing++; + break; + case 5: + num_dest_stopped++; + break; + case INT_MIN: + if(debug) + fprintf(stderr, "printer state is missing for destination %s", curr_dest->name); + break; + default: + error("Unknown printer state (%d) found.", printer_state); + break; + } + + /* + * flag job metrics to print values. + * This is needed to report also destinations with zero active jobs. + */ + struct job_metrics *jm = get_job_metrics(curr_dest->name); + jm->is_collected = 1; + } + cupsFreeDests(num_dest_total, dests); + + if (unlikely(netdata_exit)) + break; + + cups_job_t *jobs, *curr_job; + int num_jobs = cupsGetJobs2(http, &jobs, NULL, 0, CUPS_WHICHJOBS_ACTIVE); + int i; + for (i = num_jobs, curr_job = jobs; i > 0; i--, curr_job++) { + struct job_metrics *jm = get_job_metrics(curr_job->dest); + jm->is_collected = 1; + + switch (curr_job->state) { + case IPP_JOB_PENDING: + jm->num_pending++; + jm->size_pending += curr_job->size; + global_job_metrics.num_pending++; + global_job_metrics.size_pending += curr_job->size; + break; + case IPP_JOB_HELD: + jm->num_held++; + jm->size_held += curr_job->size; + global_job_metrics.num_held++; + global_job_metrics.size_held += curr_job->size; + break; + case IPP_JOB_PROCESSING: + jm->num_processing++; + jm->size_processing += curr_job->size; + global_job_metrics.num_processing++; + global_job_metrics.size_processing += curr_job->size; + break; + default: + error("Unsupported job state (%u) found.", curr_job->state); + break; + } + } + cupsFreeJobs(num_jobs, jobs); + + dictionary_get_all_name_value(dict_dest_job_metrics, collect_job_metrics, NULL); + + static int cups_printer_by_option_created = 0; + if (unlikely(!cups_printer_by_option_created)) + { + cups_printer_by_option_created = 1; + printf("CHART cups.dest_state '' 'Destinations by state' dests overview dests stacked 100000 %i\n", netdata_update_every); + printf("DIMENSION idle '' absolute 1 1\n"); + printf("DIMENSION printing '' absolute 1 1\n"); + printf("DIMENSION stopped '' absolute 1 1\n"); + + printf("CHART cups.dest_option '' 'Destinations by option' dests overview dests line 100001 %i\n", netdata_update_every); + printf("DIMENSION total '' absolute 1 1\n"); + printf("DIMENSION acceptingjobs '' absolute 1 1\n"); + printf("DIMENSION shared '' absolute 1 1\n"); + + printf("CHART cups.job_num '' 'Total active job number' jobs overview job_num stacked 100002 %i\n", netdata_update_every); + printf("DIMENSION pending '' absolute 1 1\n"); + printf("DIMENSION held '' absolute 1 1\n"); + printf("DIMENSION processing '' absolute 1 1\n"); + + printf("CHART cups.job_size '' 'Total active job size' KB overview job_size stacked 100003 %i\n", netdata_update_every); + printf("DIMENSION pending '' absolute 1 1\n"); + printf("DIMENSION held '' absolute 1 1\n"); + printf("DIMENSION processing '' absolute 1 1\n"); + } + + printf( + "BEGIN cups.dest_state\n" + "SET idle = %d\n" + "SET printing = %d\n" + "SET stopped = %d\n" + "END\n", + num_dest_idle, num_dest_printing, num_dest_stopped); + printf( + "BEGIN cups.dest_option\n" + "SET total = %d\n" + "SET acceptingjobs = %d\n" + "SET shared = %d\n" + "END\n", + num_dest_total, num_dest_accepting_jobs, num_dest_shared); + printf( + "BEGIN cups.job_num\n" + "SET pending = %d\n" + "SET held = %d\n" + "SET processing = %d\n" + "END\n", + global_job_metrics.num_pending, global_job_metrics.num_held, global_job_metrics.num_processing); + printf( + "BEGIN cups.job_size\n" + "SET pending = %d\n" + "SET held = %d\n" + "SET processing = %d\n" + "END\n", + global_job_metrics.size_pending, global_job_metrics.size_held, global_job_metrics.size_processing); + + fflush(stdout); + + if (unlikely(netdata_exit)) + break; + + // restart check (14400 seconds) + if (!now_monotonic_sec() - started_t > 14400) + break; + } + + httpClose(http); + info("CUPS process exiting"); +} + +#else // !HAVE_CUPS + +int main(int argc, char **argv) +{ + fatal("cups.plugin is not compiled."); +} + +#endif // !HAVE_CUPS diff --git a/collectors/diskspace.plugin/Makefile.in b/collectors/diskspace.plugin/Makefile.in deleted file mode 100644 index ceebc5455..000000000 --- a/collectors/diskspace.plugin/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/diskspace.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/diskspace.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md index f7d0e7b49..d743312c8 100644 --- a/collectors/diskspace.plugin/README.md +++ b/collectors/diskspace.plugin/README.md @@ -2,5 +2,34 @@ This plugin monitors the disk space usage of mounted disks, under Linux. +Two charts are available for every mount: + - Disk Space Usage + - Disk Files (inodes) Usage + +## configuration + +Simple patterns can be used to exclude mounts from showed statistics based on path or filesystem. By default read-only mounts are not displayed. To display them `yes` should be set for a chart instead of `auto`. + +``` +[plugin:proc:diskspace] + # remove charts of unmounted disks = yes + # update every = 1 + # check for new mount points every = 15 + # exclude space metrics on paths = /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* + # exclude space metrics on filesystems = *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl + # space usage for all disks = auto + # inodes usage for all disks = auto +``` + +Charts can be enabled/disabled for every mount separately: + +``` +[plugin:proc:diskspace:/] + # space usage = auto + # inodes usage = auto +``` + > for disks performance monitoring, see the `proc` plugin, [here](../proc.plugin/#monitoring-disks) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fdiskspace.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c index dca7c9076..77b87b093 100644 --- a/collectors/diskspace.plugin/plugin_diskspace.c +++ b/collectors/diskspace.plugin/plugin_diskspace.c @@ -190,7 +190,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { if(unlikely(m->do_space == CONFIG_BOOLEAN_NO && m->do_inodes == CONFIG_BOOLEAN_NO)) return; - if(unlikely(mi->flags & MOUNTINFO_READONLY && !m->collected)) + if(unlikely(mi->flags & MOUNTINFO_READONLY && !m->collected && m->do_space != CONFIG_BOOLEAN_YES && m->do_inodes != CONFIG_BOOLEAN_YES)) return; struct statvfs buff_statvfs; @@ -263,7 +263,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { , family , "disk.space" , title - , "GB" + , "GiB" , PLUGIN_DISKSPACE_NAME , NULL , NETDATA_CHART_PRIO_DISKSPACE_SPACE @@ -303,7 +303,7 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { , family , "disk.inodes" , title - , "Inodes" + , "inodes" , PLUGIN_DISKSPACE_NAME , NULL , NETDATA_CHART_PRIO_DISKSPACE_INODES diff --git a/collectors/fping.plugin/Makefile.in b/collectors/fping.plugin/Makefile.in deleted file mode 100644 index 67b9699b7..000000000 --- a/collectors/fping.plugin/Makefile.in +++ /dev/null @@ -1,591 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \ - $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \ - $(dist_libconfig_DATA) $(dist_noinst_DATA) -subdir = collectors/fping.plugin -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(pluginsdir)" \ - "$(DESTDIR)$(libconfigdir)" -SCRIPTS = $(dist_plugins_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - fping.plugin \ - $(NULL) - -SUFFIXES = .in -dist_plugins_SCRIPTS = \ - fping.plugin \ - $(NULL) - -dist_noinst_DATA = \ - fping.plugin.in \ - README.md \ - $(NULL) - -dist_libconfig_DATA = \ - fping.conf \ - $(NULL) - -all: all-am - -.SUFFIXES: -.SUFFIXES: .in -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/fping.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/build/subst.inc: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pluginsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) -install-dist_libconfigDATA: $(dist_libconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ - done - -uninstall-dist_libconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_libconfigDATA \ - install-dist_pluginsSCRIPTS - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_libconfigDATA \ - uninstall-dist_pluginsSCRIPTS - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_libconfigDATA \ - install-dist_pluginsSCRIPTS install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS - -.in: - if sed \ - -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ - -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ - -e 's#[@]configdir_POST@#$(configdir)#g' \ - -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ - -e 's#[@]cachedir_POST@#$(cachedir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/fping.plugin/README.md b/collectors/fping.plugin/README.md index a83b7912c..d5f83fdf1 100644 --- a/collectors/fping.plugin/README.md +++ b/collectors/fping.plugin/README.md @@ -94,3 +94,5 @@ That's it. netdata will detect the new plugin and start it. You can name the new plugin any name you like. Just make sure the plugin and the configuration file have the same name. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffping.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/fping.plugin/fping.plugin b/collectors/fping.plugin/fping.plugin deleted file mode 100644 index cf8f17e9a..000000000 --- a/collectors/fping.plugin/fping.plugin +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis -# GPL v3+ -# -# This plugin requires a latest version of fping. -# You can compile it from source, by running me with option: install - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" -export LC_ALL=C - -if [ "${1}" = "install" ] - then - [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/local/bin/fping." && exit 1 - - run() { - printf >&2 " > " - printf >&2 "%q " "${@}" - printf >&2 "\n" - "${@}" || exit 1 - } - - download() { - local curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)" - [ ! -z "${curl}" ] && run curl -s -L "${1}" && return 0 - - local wget="$(which wget 2>/dev/null || command -v wget 2>/dev/null)" - [ ! -z "${wget}" ] && run wget -q -O - "${1}" && return 0 - - echo >&2 "Cannot find 'curl' or 'wget' in this system." && exit 1 - } - - [ ! -d /usr/src ] && run mkdir -p /usr/src - [ ! -d /usr/local/bin ] && run mkdir -p /usr/local/bin - - run cd /usr/src - - if [ -d fping-4.0 ] - then - run rm -rf fping-4.0 || exit 1 - fi - - download 'https://github.com/schweikert/fping/releases/download/v4.0/fping-4.0.tar.gz' | run tar -zxvpf - - [ $? -ne 0 ] && exit 1 - run cd fping-4.0 || exit 1 - - run ./configure --prefix=/usr/local - run make clean - run make - if [ -f /usr/local/bin/fping ] - then - run mv -f /usr/local/bin/fping /usr/local/bin/fping.old - fi - run mv src/fping /usr/local/bin/fping - run chown root:root /usr/local/bin/fping - run chmod 4755 /usr/local/bin/fping - echo >&2 - echo >&2 "All done, you have a compatible fping now at /usr/local/bin/fping." - echo >&2 - - fping="$(which fping 2>/dev/null || command -v fping 2>/dev/null)" - if [ "${fping}" != "/usr/local/bin/fping" ] - then - echo >&2 "You have another fping installed at: ${fping}." - echo >&2 "Please set:" - echo >&2 - echo >&2 " fping=\"/usr/local/bin/fping\"" - echo >&2 - echo >&2 "at /etc/netdata/fping.conf" - echo >&2 - fi - exit 0 -fi - -# ----------------------------------------------------------------------------- - -PROGRAM_NAME="$(basename "${0}")" - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - echo "DISABLE" - exit 1 -} - -debug=0 -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" -} - -# ----------------------------------------------------------------------------- - -# store in ${plugin} the name we run under -# this allows us to copy/link fping.plugin under a different name -# to have multiple fping plugins running with different settings -plugin="${PROGRAM_NAME/.plugin/}" - - -# ----------------------------------------------------------------------------- - -# the frequency to send info to netdata -# passed by netdata as the first parameter -update_every="${1-1}" - -# the netdata configuration directory -# passed by netdata as an environment variable -[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" -[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" - -# ----------------------------------------------------------------------------- -# configuration options -# can be overwritten at /etc/netdata/fping.conf - -# the fping binary to use -# we need one that can output netdata friendly info (supporting: -N) -# if you have multiple versions, put here the full filename of the right one -fping="$( which fping 2>/dev/null || command -v fping 2>/dev/null )" - -# a space separated list of hosts to fping -# we suggest to put names here and the IPs of these names in /etc/hosts -hosts="" - -# the time in milliseconds (1 sec = 1000 ms) -# to ping the hosts - by default 5 pings per host per iteration -ping_every="$((update_every * 1000 / 5))" - -# fping options -fping_opts="-R -b 56 -i 1 -r 0 -t 5000" - -# ----------------------------------------------------------------------------- -# load the configuration files - -for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf" -do - if [ -f "${CONFIG}" ] - then - info "Loading config file '${CONFIG}'..." - source "${CONFIG}" - [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." - else - warning "Cannot find file '${CONFIG}'." - fi -done - -if [ -z "${hosts}" ] -then - fatal "no hosts configured - nothing to do." -fi - -if [ -z "${fping}" ] -then - fatal "fping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'" -fi - -if [ ! -x "${fping}" ] -then - fatal "fping command '${fping}' is not executable - cannot proceed." -fi - -if [ ${ping_every} -lt 20 ] - then - warning "ping every was set to ${ping_every} but 20 is the minimum for non-root users. Setting it to 20 ms." - ping_every=20 -fi - -# the fping options we will use -options=( -N -l -Q ${update_every} -p ${ping_every} ${fping_opts} ${hosts} ) - -# execute fping -info "starting fping: ${fping} ${options[*]}" -exec "${fping}" "${options[@]}" - -# if we cannot execute fping, stop -fatal "command '${fping} ${options[*]}' failed to be executed (returned code $?)." diff --git a/collectors/freebsd.plugin/Makefile.in b/collectors/freebsd.plugin/Makefile.in deleted file mode 100644 index d3332677b..000000000 --- a/collectors/freebsd.plugin/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/freebsd.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/freebsd.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/freebsd.plugin/README.md b/collectors/freebsd.plugin/README.md index e6302f420..237e60921 100644 --- a/collectors/freebsd.plugin/README.md +++ b/collectors/freebsd.plugin/README.md @@ -1,3 +1,5 @@ -# freebsd +# freebsd.plugin Collects resource usage and performance data on FreeBSD systems + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreebsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c index 10279aabc..81a301e4a 100644 --- a/collectors/freebsd.plugin/freebsd_devstat.c +++ b/collectors/freebsd.plugin/freebsd_devstat.c @@ -256,7 +256,7 @@ int do_kern_devstat(int update_every, usec_t dt) { disks_found = 0; - dstat = devstat_data + sizeof(long); // skip generation number + dstat = (struct devstat*)((char*)devstat_data + sizeof(long)); // skip generation number for (i = 0; i < numdevs; i++) { if (likely(do_system_io)) { @@ -360,10 +360,10 @@ int do_kern_devstat(int update_every, usec_t dt) { disk, "disk.io", "Disk I/O Bandwidth", - "kilobytes/s", - "freebsd.plugin", + "KiB/s", + "freebsd.plugin", "devstat", - NETDATA_CHART_PRIO_DISK_IO, + NETDATA_CHART_PRIO_DISK_IO, update_every, RRDSET_TYPE_AREA ); @@ -398,9 +398,9 @@ int do_kern_devstat(int update_every, usec_t dt) { "disk.ops", "Disk Completed I/O Operations", "operations/s", - "freebsd.plugin", - "devstat", - NETDATA_CHART_PRIO_DISK_OPS, + "freebsd.plugin", + "devstat", + NETDATA_CHART_PRIO_DISK_OPS, update_every, RRDSET_TYPE_LINE ); @@ -437,9 +437,9 @@ int do_kern_devstat(int update_every, usec_t dt) { "disk.qops", "Disk Current I/O Operations", "operations", - "freebsd.plugin", + "freebsd.plugin", "devstat", - NETDATA_CHART_PRIO_DISK_QOPS, + NETDATA_CHART_PRIO_DISK_QOPS, update_every, RRDSET_TYPE_LINE ); @@ -466,9 +466,9 @@ int do_kern_devstat(int update_every, usec_t dt) { "disk.util", "Disk Utilization Time", "% of time working", - "freebsd.plugin", + "freebsd.plugin", "devstat", - NETDATA_CHART_PRIO_DISK_UTIL, + NETDATA_CHART_PRIO_DISK_UTIL, update_every, RRDSET_TYPE_AREA ); @@ -499,9 +499,9 @@ int do_kern_devstat(int update_every, usec_t dt) { "disk.iotime", "Disk Total I/O Time", "milliseconds/s", - "freebsd.plugin", + "freebsd.plugin", "devstat", - NETDATA_CHART_PRIO_DISK_IOTIME, + NETDATA_CHART_PRIO_DISK_IOTIME, update_every, RRDSET_TYPE_LINE ); @@ -546,10 +546,10 @@ int do_kern_devstat(int update_every, usec_t dt) { disk, "disk.await", "Average Completed I/O Operation Time", - "ms per operation", - "freebsd.plugin", + "milliseconds/operation", + "freebsd.plugin", "devstat", - NETDATA_CHART_PRIO_DISK_AWAIT, + NETDATA_CHART_PRIO_DISK_AWAIT, update_every, RRDSET_TYPE_LINE ); @@ -611,10 +611,10 @@ int do_kern_devstat(int update_every, usec_t dt) { disk, "disk.avgsz", "Average Completed I/O Operation Bandwidth", - "kilobytes per operation", - "freebsd.plugin", + "KiB/operation", + "freebsd.plugin", "devstat", - NETDATA_CHART_PRIO_DISK_AVGSZ, + NETDATA_CHART_PRIO_DISK_AVGSZ, update_every, RRDSET_TYPE_AREA ); @@ -668,10 +668,10 @@ int do_kern_devstat(int update_every, usec_t dt) { disk, "disk.svctm", "Average Service Time", - "ms per operation", - "freebsd.plugin", + "milliseconds/operation", + "freebsd.plugin", "devstat", - NETDATA_CHART_PRIO_DISK_SVCTM, + NETDATA_CHART_PRIO_DISK_SVCTM, update_every, RRDSET_TYPE_LINE ); @@ -728,10 +728,10 @@ int do_kern_devstat(int update_every, usec_t dt) { "disk", NULL, "Disk I/O", - "kilobytes/s", - "freebsd.plugin", + "KiB/s", + "freebsd.plugin", "devstat", - NETDATA_CHART_PRIO_SYSTEM_IO, + NETDATA_CHART_PRIO_SYSTEM_IO, update_every, RRDSET_TYPE_AREA ); diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c index e15845857..ac1638ee7 100644 --- a/collectors/freebsd.plugin/freebsd_getifaddrs.c +++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c @@ -144,7 +144,7 @@ int do_getifaddrs(int update_every, usec_t dt) { (void)dt; #define DEFAULT_EXLUDED_INTERFACES "lo*" -#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe*" +#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet*" #define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs" static int enable_new_interfaces = -1; @@ -156,7 +156,7 @@ int do_getifaddrs(int update_every, usec_t dt) { enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "enable new interfaces detected at runtime", CONFIG_BOOLEAN_AUTO); - + do_bandwidth_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for physical interfaces", CONFIG_BOOLEAN_AUTO); do_packets_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total packets for physical interfaces", @@ -229,7 +229,7 @@ int do_getifaddrs(int update_every, usec_t dt) { // -------------------------------------------------------------------- if (likely(do_bandwidth_net)) { - + iftot.ift_ibytes = iftot.ift_obytes = 0; for (ifa = ifap; ifa; ifa = ifa->ifa_next) { if (ifa->ifa_addr->sa_family != AF_LINK) @@ -239,10 +239,10 @@ int do_getifaddrs(int update_every, usec_t dt) { iftot.ift_ibytes += IFA_DATA(ibytes); iftot.ift_obytes += IFA_DATA(obytes); } - + static RRDSET *st = NULL; static RRDDIM *rd_in = NULL, *rd_out = NULL; - + if (unlikely(!st)) { st = rrdset_create_localhost("system", "net", @@ -251,23 +251,23 @@ int do_getifaddrs(int update_every, usec_t dt) { NULL, "Network Traffic", "kilobits/s", - "freebsd.plugin", + "freebsd.plugin", "getifaddrs", - NETDATA_CHART_PRIO_SYSTEM_NET, + NETDATA_CHART_PRIO_SYSTEM_NET, update_every, RRDSET_TYPE_AREA ); - + rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); } else rrdset_next(st); - + rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes); rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes); rrdset_done(st); } - + // -------------------------------------------------------------------- if (likely(do_packets_net)) { @@ -295,9 +295,9 @@ int do_getifaddrs(int update_every, usec_t dt) { NULL, "Network Packets", "packets/s", - "freebsd.plugin", + "freebsd.plugin", "getifaddrs", - NETDATA_CHART_PRIO_SYSTEM_PACKETS, + NETDATA_CHART_PRIO_SYSTEM_PACKETS, update_every, RRDSET_TYPE_LINE ); @@ -340,9 +340,9 @@ int do_getifaddrs(int update_every, usec_t dt) { NULL, "IPv4 Bandwidth", "kilobits/s", - "freebsd.plugin", + "freebsd.plugin", "getifaddrs", - NETDATA_CHART_PRIO_SYSTEM_IPV4, + NETDATA_CHART_PRIO_SYSTEM_IPV4, update_every, RRDSET_TYPE_AREA ); @@ -379,9 +379,9 @@ int do_getifaddrs(int update_every, usec_t dt) { NULL, "IPv6 Bandwidth", "kilobits/s", - "freebsd.plugin", + "freebsd.plugin", "getifaddrs", - NETDATA_CHART_PRIO_SYSTEM_IPV6, + NETDATA_CHART_PRIO_SYSTEM_IPV6, update_every, RRDSET_TYPE_AREA ); @@ -449,9 +449,9 @@ int do_getifaddrs(int update_every, usec_t dt) { "net.net", "Bandwidth", "kilobits/s", - "freebsd.plugin", + "freebsd.plugin", "getifaddrs", - NETDATA_CHART_PRIO_FIRST_NET_IFACE, + NETDATA_CHART_PRIO_FIRST_NET_IFACE, update_every, RRDSET_TYPE_AREA ); @@ -478,9 +478,9 @@ int do_getifaddrs(int update_every, usec_t dt) { "net.packets", "Packets", "packets/s", - "freebsd.plugin", + "freebsd.plugin", "getifaddrs", - NETDATA_CHART_PRIO_FIRST_NET_PACKETS, + NETDATA_CHART_PRIO_FIRST_NET_PACKETS, update_every, RRDSET_TYPE_LINE ); @@ -517,9 +517,9 @@ int do_getifaddrs(int update_every, usec_t dt) { "net.errors", "Interface Errors", "errors/s", - "freebsd.plugin", + "freebsd.plugin", "getifaddrs", - NETDATA_CHART_PRIO_FIRST_NET_ERRORS, + NETDATA_CHART_PRIO_FIRST_NET_ERRORS, update_every, RRDSET_TYPE_LINE ); @@ -551,9 +551,9 @@ int do_getifaddrs(int update_every, usec_t dt) { "net.drops", "Interface Drops", "drops/s", - "freebsd.plugin", + "freebsd.plugin", "getifaddrs", - NETDATA_CHART_PRIO_FIRST_NET_DROPS, + NETDATA_CHART_PRIO_FIRST_NET_DROPS, update_every, RRDSET_TYPE_LINE ); @@ -586,9 +586,9 @@ int do_getifaddrs(int update_every, usec_t dt) { "net.events", "Network Interface Events", "events/s", - "freebsd.plugin", + "freebsd.plugin", "getifaddrs", - NETDATA_CHART_PRIO_FIRST_NET_EVENTS, + NETDATA_CHART_PRIO_FIRST_NET_EVENTS, update_every, RRDSET_TYPE_LINE ); diff --git a/collectors/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c index c86f23166..d050c6270 100644 --- a/collectors/freebsd.plugin/freebsd_getmntinfo.c +++ b/collectors/freebsd.plugin/freebsd_getmntinfo.c @@ -226,10 +226,10 @@ int do_getmntinfo(int update_every, usec_t dt) { mntbuf[i].f_mntonname, "disk.space", title, - "GB", - "freebsd.plugin", + "GiB", + "freebsd.plugin", "getmntinfo", - NETDATA_CHART_PRIO_DISKSPACE_SPACE, + NETDATA_CHART_PRIO_DISKSPACE_SPACE, update_every, RRDSET_TYPE_STACKED ); @@ -265,10 +265,10 @@ int do_getmntinfo(int update_every, usec_t dt) { mntbuf[i].f_mntonname, "disk.inodes", title, - "Inodes", - "freebsd.plugin", + "inodes", + "freebsd.plugin", "getmntinfo", - NETDATA_CHART_PRIO_DISKSPACE_INODES, + NETDATA_CHART_PRIO_DISKSPACE_INODES, update_every, RRDSET_TYPE_STACKED ); diff --git a/collectors/freebsd.plugin/freebsd_ipfw.c b/collectors/freebsd.plugin/freebsd_ipfw.c index c256da8b3..a1e50e204 100644 --- a/collectors/freebsd.plugin/freebsd_ipfw.c +++ b/collectors/freebsd.plugin/freebsd_ipfw.c @@ -197,7 +197,7 @@ int do_ipfw(int update_every, usec_t dt) { NULL, "Packets", "packets/s", - "freebsd.plugin", + "freebsd.plugin", "ipfw", NETDATA_CHART_PRIO_IPFW_PACKETS, update_every, @@ -214,7 +214,7 @@ int do_ipfw(int update_every, usec_t dt) { NULL, "Bytes", "bytes/s", - "freebsd.plugin", + "freebsd.plugin", "ipfw", NETDATA_CHART_PRIO_IPFW_BYTES, update_every, @@ -318,7 +318,7 @@ int do_ipfw(int update_every, usec_t dt) { NULL, "Active rules", "rules", - "freebsd.plugin", + "freebsd.plugin", "ipfw", NETDATA_CHART_PRIO_IPFW_ACTIVE, update_every, @@ -335,7 +335,7 @@ int do_ipfw(int update_every, usec_t dt) { NULL, "Expired rules", "rules", - "freebsd.plugin", + "freebsd.plugin", "ipfw", NETDATA_CHART_PRIO_IPFW_EXPIRED, update_every, diff --git a/collectors/freebsd.plugin/freebsd_kstat_zfs.c b/collectors/freebsd.plugin/freebsd_kstat_zfs.c index 93dfc320b..02103c6b8 100644 --- a/collectors/freebsd.plugin/freebsd_kstat_zfs.c +++ b/collectors/freebsd.plugin/freebsd_kstat_zfs.c @@ -11,6 +11,10 @@ extern struct arcstats arcstats; int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) { (void)dt; + static int show_zero_charts = -1; + if(unlikely(show_zero_charts == -1)) + show_zero_charts = config_get_boolean_ondemand("plugin:freebsd:zfs_arcstats", "show zero charts", CONFIG_BOOLEAN_NO); + unsigned long long l2_size; size_t uint64_t_size = sizeof(uint64_t); static struct mibs { @@ -31,11 +35,11 @@ int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) { int deleted[5]; int mutex_miss[5]; int evict_skip[5]; - int evict_not_enough[5]; - int evict_l2_cached[5]; - int evict_l2_eligible[5]; - int evict_l2_ineligible[5]; - int evict_l2_skip[5]; + // int evict_not_enough[5]; + // int evict_l2_cached[5]; + // int evict_l2_eligible[5]; + // int evict_l2_ineligible[5]; + // int evict_l2_skip[5]; int hash_elements[5]; int hash_elements_max[5]; int hash_collisions[5]; @@ -46,60 +50,60 @@ int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) { int c_min[5]; int c_max[5]; int size[5]; - int hdr_size[5]; - int data_size[5]; - int metadata_size[5]; - int other_size[5]; - int anon_size[5]; - int anon_evictable_data[5]; - int anon_evictable_metadata[5]; + // int hdr_size[5]; + // int data_size[5]; + // int metadata_size[5]; + // int other_size[5]; + // int anon_size[5]; + // int anon_evictable_data[5]; + // int anon_evictable_metadata[5]; int mru_size[5]; - int mru_evictable_data[5]; - int mru_evictable_metadata[5]; - int mru_ghost_size[5]; - int mru_ghost_evictable_data[5]; - int mru_ghost_evictable_metadata[5]; + // int mru_evictable_data[5]; + // int mru_evictable_metadata[5]; + // int mru_ghost_size[5]; + // int mru_ghost_evictable_data[5]; + // int mru_ghost_evictable_metadata[5]; int mfu_size[5]; - int mfu_evictable_data[5]; - int mfu_evictable_metadata[5]; - int mfu_ghost_size[5]; - int mfu_ghost_evictable_data[5]; - int mfu_ghost_evictable_metadata[5]; + // int mfu_evictable_data[5]; + // int mfu_evictable_metadata[5]; + // int mfu_ghost_size[5]; + // int mfu_ghost_evictable_data[5]; + // int mfu_ghost_evictable_metadata[5]; int l2_hits[5]; int l2_misses[5]; - int l2_feeds[5]; - int l2_rw_clash[5]; + // int l2_feeds[5]; + // int l2_rw_clash[5]; int l2_read_bytes[5]; int l2_write_bytes[5]; - int l2_writes_sent[5]; - int l2_writes_done[5]; - int l2_writes_error[5]; - int l2_writes_lock_retry[5]; - int l2_evict_lock_retry[5]; - int l2_evict_reading[5]; - int l2_evict_l1cached[5]; - int l2_free_on_write[5]; - int l2_cdata_free_on_write[5]; - int l2_abort_lowmem[5]; - int l2_cksum_bad[5]; - int l2_io_error[5]; + // int l2_writes_sent[5]; + // int l2_writes_done[5]; + // int l2_writes_error[5]; + // int l2_writes_lock_retry[5]; + // int l2_evict_lock_retry[5]; + // int l2_evict_reading[5]; + // int l2_evict_l1cached[5]; + // int l2_free_on_write[5]; + // int l2_cdata_free_on_write[5]; + // int l2_abort_lowmem[5]; + // int l2_cksum_bad[5]; + // int l2_io_error[5]; int l2_size[5]; int l2_asize[5]; - int l2_hdr_size[5]; - int l2_compress_successes[5]; - int l2_compress_zeros[5]; - int l2_compress_failures[5]; + // int l2_hdr_size[5]; + // int l2_compress_successes[5]; + // int l2_compress_zeros[5]; + // int l2_compress_failures[5]; int memory_throttle_count[5]; - int duplicate_buffers[5]; - int duplicate_buffers_size[5]; - int duplicate_reads[5]; - int memory_direct_count[5]; - int memory_indirect_count[5]; - int arc_no_grow[5]; - int arc_tempreserve[5]; - int arc_loaned_bytes[5]; - int arc_prune[5]; - int arc_meta_used[5]; + // int duplicate_buffers[5]; + // int duplicate_buffers_size[5]; + // int duplicate_reads[5]; + // int memory_direct_count[5]; + // int memory_indirect_count[5]; + // int arc_no_grow[5]; + // int arc_tempreserve[5]; + // int arc_loaned_bytes[5]; + // int arc_prune[5]; + // int arc_meta_used[5]; int arc_meta_limit[5]; int arc_meta_max[5]; int arc_meta_min[5]; @@ -209,8 +213,8 @@ int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) { // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_need_free", mibs.arc_need_free, arcstats.arc_need_free); // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_sys_free", mibs.arc_sys_free, arcstats.arc_sys_free); - generate_charts_arcstats("freebsd", "zfs", update_every); - generate_charts_arc_summary("freebsd", "zfs", update_every); + generate_charts_arcstats("freebsd", "zfs", show_zero_charts, update_every); + generate_charts_arc_summary("freebsd", "zfs", show_zero_charts, update_every); return 0; } @@ -261,7 +265,7 @@ int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) { rrddim_set_by_pointer(st_bytes, rd_bytes, bytes); rrdset_done(st_bytes); - + // -------------------------------------------------------------------- static RRDSET *st_requests = NULL; @@ -293,7 +297,7 @@ int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) { rrddim_set_by_pointer(st_requests, rd_failed, failed); rrddim_set_by_pointer(st_requests, rd_unsupported, unsupported); rrdset_done(st_requests); - + } return 0; diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c index da5a351de..3f1b10018 100644 --- a/collectors/freebsd.plugin/freebsd_sysctl.c +++ b/collectors/freebsd.plugin/freebsd_sysctl.c @@ -276,7 +276,7 @@ int do_vm_vmtotal(int update_every, usec_t dt) { "system", NULL, "Committed (Allocated) Memory", - "MB", + "MiB", "freebsd.plugin", "vm.vmtotal", NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED, @@ -580,7 +580,6 @@ int do_hw_intcnt(int update_every, usec_t dt) { (void)dt; static int mib_hw_intrcnt[2] = {0, 0}; size_t intrcnt_size = 0; - unsigned long i; if (unlikely(GETSYSCTL_SIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt_size))) { error("DISABLED: system.intr chart"); @@ -591,7 +590,7 @@ int do_hw_intcnt(int update_every, usec_t dt) { unsigned long nintr = 0; static unsigned long old_nintr = 0; static unsigned long *intrcnt = NULL; - unsigned long long totalintr = 0; + unsigned long i; nintr = intrcnt_size / sizeof(u_long); if (unlikely(nintr != old_nintr)) @@ -602,6 +601,8 @@ int do_hw_intcnt(int update_every, usec_t dt) { error("DISABLED: hw.intrcnt module"); return 1; } else { + unsigned long long totalintr = 0; + for (i = 0; i < nintr; i++) totalintr += intrcnt[i]; @@ -653,7 +654,6 @@ int do_hw_intcnt(int update_every, usec_t dt) { // -------------------------------------------------------------------- static RRDSET *st_interrupts = NULL; - void *p; if (unlikely(!st_interrupts)) st_interrupts = rrdset_create_localhost( @@ -674,6 +674,8 @@ int do_hw_intcnt(int update_every, usec_t dt) { rrdset_next(st_interrupts); for (i = 0; i < nintr; i++) { + void *p; + p = intrnames + i * (MAXCOMLEN + 1); if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) { RRDDIM *rd_interrupts = rrddim_find(st_interrupts, p); @@ -936,7 +938,7 @@ int do_vm_swap_info(int update_every, usec_t dt) { "swap", NULL, "System Swap", - "MB", + "MiB", "freebsd.plugin", "vm.swap_info", NETDATA_CHART_PRIO_SYSTEM_SWAP, @@ -965,11 +967,14 @@ int do_vm_swap_info(int update_every, usec_t dt) { int do_system_ram(int update_every, usec_t dt) { (void)dt; static int mib_active_count[4] = {0, 0, 0, 0}, mib_inactive_count[4] = {0, 0, 0, 0}, mib_wire_count[4] = {0, 0, 0, 0}, - mib_cache_count[4] = {0, 0, 0, 0}, mib_laundry_count[4] = {0, 0, 0, 0}, mib_vfs_bufspace[2] = {0, 0}, - mib_free_count[4] = {0, 0, 0, 0}; + mib_cache_count[4] = {0, 0, 0, 0}, mib_vfs_bufspace[2] = {0, 0}, mib_free_count[4] = {0, 0, 0, 0}; vmmeter_t vmmeter_data; int vfs_bufspace_count; +#if defined(NETDATA_COLLECT_LAUNDRY) + static int mib_laundry_count[4] = {0, 0, 0, 0}; +#endif + if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_active_count", mib_active_count, vmmeter_data.v_active_count) || GETSYSCTL_SIMPLE("vm.stats.vm.v_inactive_count", mib_inactive_count, vmmeter_data.v_inactive_count) || GETSYSCTL_SIMPLE("vm.stats.vm.v_wire_count", mib_wire_count, vmmeter_data.v_wire_count) || @@ -990,7 +995,11 @@ int do_system_ram(int update_every, usec_t dt) { static RRDSET *st = NULL; static RRDDIM *rd_free = NULL, *rd_active = NULL, *rd_inactive = NULL, *rd_wired = NULL, - *rd_cache = NULL, *rd_laundry = NULL, *rd_buffers = NULL; + *rd_cache = NULL, *rd_buffers = NULL; + +#if defined(NETDATA_COLLECT_LAUNDRY) + static RRDDIM *rd_laundry = NULL; +#endif if (unlikely(!st)) { st = rrdset_create_localhost( @@ -1000,7 +1009,7 @@ int do_system_ram(int update_every, usec_t dt) { "ram", NULL, "System RAM", - "MB", + "MiB", "freebsd.plugin", "system.ram", NETDATA_CHART_PRIO_SYSTEM_RAM, @@ -1067,7 +1076,7 @@ int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt) { "swap", NULL, "Swap I/O", - "kilobytes/s", + "KiB/s", "freebsd.plugin", "vm.stats.vm.v_swappgs", NETDATA_CHART_PRIO_SYSTEM_SWAPIO, @@ -1155,7 +1164,7 @@ int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt) { int do_kern_ipc_sem(int update_every, usec_t dt) { (void)dt; - static int mib_semmni[3] = {0, 0, 0}, mib_sema[3] = {0, 0, 0}; + static int mib_semmni[3] = {0, 0, 0}; struct ipc_sem { int semmni; collected_number sets; @@ -1170,6 +1179,7 @@ int do_kern_ipc_sem(int update_every, usec_t dt) { } else { static struct semid_kernel *ipc_sem_data = NULL; static int old_semmni = 0; + static int mib_sema[3] = {0, 0, 0}; if (unlikely(ipc_sem.semmni != old_semmni)) { ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni); @@ -1253,7 +1263,7 @@ int do_kern_ipc_sem(int update_every, usec_t dt) { int do_kern_ipc_shm(int update_every, usec_t dt) { (void)dt; - static int mib_shmmni[3] = {0, 0, 0}, mib_shmsegs[3] = {0, 0, 0}; + static int mib_shmmni[3] = {0, 0, 0}; struct ipc_shm { u_long shmmni; collected_number segs; @@ -1268,6 +1278,7 @@ int do_kern_ipc_shm(int update_every, usec_t dt) { } else { static struct shmid_kernel *ipc_shm_data = NULL; static u_long old_shmmni = 0; + static int mib_shmsegs[3] = {0, 0, 0}; if (unlikely(ipc_shm.shmmni != old_shmmni)) { ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni); @@ -1327,7 +1338,7 @@ int do_kern_ipc_shm(int update_every, usec_t dt) { "ipc shared memory", NULL, "IPC Shared Memory Segments Size", - "kilobytes", + "KiB", "freebsd.plugin", "kern.ipc.shm", NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE, @@ -1352,7 +1363,7 @@ int do_kern_ipc_shm(int update_every, usec_t dt) { int do_kern_ipc_msq(int update_every, usec_t dt) { (void)dt; - static int mib_msgmni[3] = {0, 0, 0}, mib_msqids[3] = {0, 0, 0}; + static int mib_msgmni[3] = {0, 0, 0}; struct ipc_msq { int msgmni; collected_number queues; @@ -1370,6 +1381,7 @@ int do_kern_ipc_msq(int update_every, usec_t dt) { } else { static struct msqid_kernel *ipc_msq_data = NULL; static int old_msgmni = 0; + static int mib_msqids[3] = {0, 0, 0}; if (unlikely(ipc_msq.msgmni != old_msgmni)) { ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni); @@ -1531,12 +1543,6 @@ int do_net_isr(int update_every, usec_t dt) { do_netisr_per_core = config_get_boolean("plugin:freebsd:net.isr", "netisr per core", 1); } - static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0}; - int common_error = 0; - size_t netisr_workstream_size = 0, netisr_work_size = 0; - unsigned long num_netisr_workstreams = 0, num_netisr_works = 0; - static struct sysctl_netisr_workstream *netisr_workstream = NULL; - static struct sysctl_netisr_work *netisr_work = NULL; static struct netisr_stats { collected_number dispatched; collected_number hybrid_dispatched; @@ -1545,6 +1551,13 @@ int do_net_isr(int update_every, usec_t dt) { } *netisr_stats = NULL; if (likely(do_netisr || do_netisr_per_core)) { + static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0}; + size_t netisr_workstream_size = 0, netisr_work_size = 0; + static struct sysctl_netisr_workstream *netisr_workstream = NULL; + static struct sysctl_netisr_work *netisr_work = NULL; + unsigned long num_netisr_workstreams = 0, num_netisr_works = 0; + int common_error = 0; + if (unlikely(GETSYSCTL_SIZE("net.isr.workstream", mib_workstream, netisr_workstream_size))) { common_error = 1; } else if (unlikely(GETSYSCTL_SIZE("net.isr.work", mib_work, netisr_work_size))) { @@ -2034,7 +2047,7 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) { rrddim_set_by_pointer(st, rd_failed, tcpstat.tcps_sc_zonefail); rrdset_done(st); } - + // -------------------------------------------------------------------- if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO && tcpstat.tcps_listendrop)) { @@ -2243,7 +2256,6 @@ int do_net_inet_icmp_stats(int update_every, usec_t dt) { if (likely(do_icmp_packets || do_icmp_errors || do_icmpmsg)) { static int mib[4] = {0, 0, 0, 0}; struct icmpstat icmpstat; - int i; struct icmp_total { u_long msgs_in; u_long msgs_out; @@ -2259,6 +2271,8 @@ int do_net_inet_icmp_stats(int update_every, usec_t dt) { error("DISABLED: net.inet.icmp.stats module"); return 1; } else { + int i; + for (i = 0; i <= ICMP_MAXTYPE; i++) { icmp_total.msgs_in += icmpstat.icps_inhist[i]; icmp_total.msgs_out += icmpstat.icps_outhist[i]; @@ -2668,7 +2682,7 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) { NULL, "IPv6 Fragments Sent", "packets/s", - "freebsd.plugin", + "freebsd.plugin", "net.inet6.ip6.stats", 3010, update_every, diff --git a/collectors/freeipmi.plugin/Makefile.in b/collectors/freeipmi.plugin/Makefile.in deleted file mode 100644 index 54a0035c6..000000000 --- a/collectors/freeipmi.plugin/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/freeipmi.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/freeipmi.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md index 6d4ad1865..a2beddb5d 100644 --- a/collectors/freeipmi.plugin/README.md +++ b/collectors/freeipmi.plugin/README.md @@ -1,8 +1,10 @@ -netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin. +# freeipmi.plugin + +Netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin. > FreeIPMI provides in-band and out-of-band IPMI software based on the IPMI v1.5/2.0 specification. The IPMI specification defines a set of interfaces for platform management and is implemented by a number vendors for system management. The features of IPMI that most users will be interested in are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL). -## compile `freeipmi.plugin` +## Compile `freeipmi.plugin` 1. install `libipmimonitoring-dev` or `libipmimonitoring-devel` (`freeipmi-devel` on RHEL based OS) using the package manager of your system. @@ -12,7 +14,7 @@ Keep in mind IPMI requires root access, so the plugin is setuid to root. If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the netdata plugin will be able to work. -## netdata use +## Netdata use The plugin creates (up to) 8 charts, based on the information collected from IPMI: @@ -101,7 +103,7 @@ You can set these options in `/etc/netdata/netdata.conf` at this section: Append to `command options = ` the settings you need. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable. -## ignoring specific sensors +## Ignoring specific sensors Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library used by netdata's `freeipmi.plugin`). @@ -135,7 +137,7 @@ ID | Name | Type | State | Reading | Unit ``` -## debugging +## Debugging You can run the plugin by hand: @@ -178,3 +180,5 @@ If you need to disable IPMI for netdata, edit `/etc/netdata/netdata.conf` and se [plugins] freeipmi = no ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ffreeipmi.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c index 7fc012d38..35b9a0032 100644 --- a/collectors/freeipmi.plugin/freeipmi_plugin.c +++ b/collectors/freeipmi.plugin/freeipmi_plugin.c @@ -35,6 +35,13 @@ void netdata_cleanup_and_exit(int ret) { exit(ret); } +void send_statistics( const char *action, const char *action_result, const char *action_data) { + (void)action; + (void)action_result; + (void)action_data; + return; +} + // callbacks required by popen() void signals_block(void) {}; void signals_unblock(void) {}; @@ -102,11 +109,11 @@ char *sensor_config_file = NULL; * - See ipmi_monitoring.h for descriptions of these flags. */ int reread_sdr_cache = 0; -int ignore_non_interpretable_sensors = 1; +int ignore_non_interpretable_sensors = 0; int bridge_sensors = 0; int interpret_oem_data = 0; int shared_sensors = 0; -int discrete_reading = 0; +int discrete_reading = 1; int ignore_scanning_disabled = 0; int assume_bmc_owner = 0; int entity_sensor_names = 0; @@ -321,7 +328,7 @@ static void send_chart_to_netdata_for_units(int units) { switch(units) { case IPMI_MONITORING_SENSOR_UNITS_CELSIUS: - printf("CHART ipmi.temperatures_c '' 'System Celcius Temperatures read by IPMI' 'Celcius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n" + printf("CHART ipmi.temperatures_c '' 'System Celsius Temperatures read by IPMI' 'Celsius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n" , netdata_priority + 10 , netdata_update_every ); @@ -665,10 +672,13 @@ static void netdata_get_sensor( if(!sn) { // not found, create it - // check if it is excluded - if(excluded_record_ids_check(record_id)) + if(excluded_record_ids_check(record_id)) { + if(debug) fprintf(stderr, "Sensor '%s' is excluded by excluded_record_ids_check()\n", sensor_name); return; + } + + if(debug) fprintf(stderr, "Allocating new sensor data record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n", sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type); sn = calloc(1, sizeof(struct sensor)); if(!sn) { @@ -689,6 +699,9 @@ static void netdata_get_sensor( sn->next = sensors_root; sensors_root = sn; } + else { + if(debug) fprintf(stderr, "Reusing sensor record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n", sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type); + } switch(sensor_reading_type) { case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL: @@ -710,13 +723,16 @@ static void netdata_get_sensor( break; default: + if(debug) fprintf(stderr, "Unknown reading type - Ignoring sensor record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n", sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type); sn->ignore = 1; break; } // check if it is excluded - if(excluded_status_record_ids_check(record_id)) + if(excluded_status_record_ids_check(record_id)) { + if(debug) fprintf(stderr, "Sensor '%s' is excluded for status check, by excluded_status_record_ids_check()\n", sensor_name); return; + } switch(sensor_state) { case IPMI_MONITORING_STATE_NOMINAL: @@ -963,12 +979,13 @@ _ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config) goto cleanup; } - if (!(sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx))) - { - error( "ipmi_monitoring_sensor_read_sensor_bitmask_strings(): %s", - ipmi_monitoring_ctx_errormsg (ctx)); - goto cleanup; - } + /* it's ok for this to be NULL, i.e. sensor_bitmask == + * IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN + */ + sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx); + + + #endif // NETDATA_COMMENTED if ((sensor_reading_type = ipmi_monitoring_sensor_read_sensor_reading_type (ctx)) < 0) @@ -1075,7 +1092,8 @@ _ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config) else printf (", N/A"); - if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN) + if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN + && sensor_bitmask_strings) { unsigned int i = 0; diff --git a/collectors/idlejitter.plugin/Makefile.in b/collectors/idlejitter.plugin/Makefile.in deleted file mode 100644 index 973a3bef7..000000000 --- a/collectors/idlejitter.plugin/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/idlejitter.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/idlejitter.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md index 3c2080536..e8e78085d 100644 --- a/collectors/idlejitter.plugin/README.md +++ b/collectors/idlejitter.plugin/README.md @@ -1,8 +1,8 @@ -## idlejitter.plugin +# idlejitter.plugin It works like this: -A thread is spawn that requests to sleep for 20000 microseconds (20ms). +A thread is spawned that requests to sleep for 20000 microseconds (20ms). When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done at most 50 times per second, to ensure we have a good average. @@ -11,3 +11,5 @@ This number is useful: 1. in real-time environments, when the CPU jitter can affect the quality of the service (like VoIP media gateways). 2. in cloud infrastructure, at can pause the VM or container for a small duration to perform operations at the host. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fidlejitter.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/macos.plugin/Makefile.in b/collectors/macos.plugin/Makefile.in deleted file mode 100644 index d5979211d..000000000 --- a/collectors/macos.plugin/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/macos.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/macos.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md index ddbcc8f9b..3e2554e47 100644 --- a/collectors/macos.plugin/README.md +++ b/collectors/macos.plugin/README.md @@ -1,3 +1,5 @@ -# macos +# macos.plugin Collects resource usage and performance data on MacOS systems + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fmacos.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c index 5d0ba929e..f253489a5 100644 --- a/collectors/macos.plugin/macos_fw.c +++ b/collectors/macos.plugin/macos_fw.c @@ -154,7 +154,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , diskstat.name , "disk.io" , "Disk I/O Bandwidth" - , "kilobytes/s" + , "KiB/s" , "macos" , "iokit" , 2000 @@ -306,7 +306,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , diskstat.name , "disk.await" , "Average Completed I/O Operation Time" - , "ms per operation" + , "milliseconds/operation" , "macos" , "iokit" , 2005 @@ -337,7 +337,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , diskstat.name , "disk.avgsz" , "Average Completed I/O Operation Bandwidth" - , "kilobytes per operation" + , "KiB/operation" , "macos" , "iokit" , 2006 @@ -368,7 +368,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , diskstat.name , "disk.svctm" , "Average Service Time" - , "ms per operation" + , "milliseconds/operation" , "macos" , "iokit" , 2007 @@ -410,7 +410,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk" , NULL , "Disk I/O" - , "kilobytes/s" + , "KiB/s" , "macos" , "iokit" , 150 @@ -463,7 +463,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , mntbuf[i].f_mntonname , "disk.space" , title - , "GB" + , "GiB" , "macos" , "iokit" , 2023 @@ -496,7 +496,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , mntbuf[i].f_mntonname , "disk.inodes" , title - , "Inodes" + , "inodes" , "macos" , "iokit" , 2024 diff --git a/collectors/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c index 1c43d624c..800b2ce56 100644 --- a/collectors/macos.plugin/macos_mach_smi.c +++ b/collectors/macos.plugin/macos_mach_smi.c @@ -90,7 +90,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) { } // -------------------------------------------------------------------- - + if (likely(do_ram || do_swapio || do_pgfaults)) { #if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060) count = sizeof(vm_statistics64_data_t); @@ -118,7 +118,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) { , "ram" , NULL , "System RAM" - , "MB" + , "MiB" , "macos" , "mach_smi" , 200 @@ -165,7 +165,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) { , "swap" , NULL , "Swap I/O" - , "kilobytes/s" + , "KiB/s" , "macos" , "mach_smi" , 250 @@ -196,7 +196,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) { , "system" , NULL , "Memory Page Faults" - , "page faults/s" + , "faults/s" , "macos" , "mach_smi" , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS @@ -233,8 +233,8 @@ int do_macos_mach_smi(int update_every, usec_t dt) { rrdset_done(st); } } - } - + } + // -------------------------------------------------------------------- return 0; diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c index 6b443c04a..a8af72e69 100644 --- a/collectors/macos.plugin/macos_sysctl.c +++ b/collectors/macos.plugin/macos_sysctl.c @@ -279,7 +279,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , "swap" , NULL , "System Swap" - , "MB" + , "MiB" , "macos" , "sysctl" , 201 @@ -965,7 +965,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { } } } - + // -------------------------------------------------------------------- if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) { diff --git a/collectors/nfacct.plugin/Makefile.in b/collectors/nfacct.plugin/Makefile.in deleted file mode 100644 index 2a1d001de..000000000 --- a/collectors/nfacct.plugin/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/nfacct.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/nfacct.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md index 814b47915..5f1ee2e7c 100644 --- a/collectors/nfacct.plugin/README.md +++ b/collectors/nfacct.plugin/README.md @@ -8,3 +8,5 @@ We have to move the code to an external plugin to setuid just the plugin not the You can build netdata with it to test it though. Just run `./configure` (or `netdata-installer.sh`) with the option `--enable-plugin-nfacct` (and any other options you may need). Remember, you have to tell netdata you want it to run as `root` for this plugin to work. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnfacct.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/node.d.plugin/.keep b/collectors/node.d.plugin/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/node.d.plugin/Makefile.am b/collectors/node.d.plugin/Makefile.am index 4de13cf76..3b5a0a518 100644 --- a/collectors/node.d.plugin/Makefile.am +++ b/collectors/node.d.plugin/Makefile.am @@ -23,12 +23,11 @@ dist_noinst_DATA = \ usernodeconfigdir=$(configdir)/node.d dist_usernodeconfig_DATA = \ - $(top_srcdir)/installer/.keep \ + .keep \ $(NULL) nodeconfigdir=$(libconfigdir)/node.d dist_nodeconfig_DATA = \ - $(top_srcdir)/installer/.keep \ $(NULL) dist_node_DATA = \ diff --git a/collectors/node.d.plugin/Makefile.in b/collectors/node.d.plugin/Makefile.in deleted file mode 100644 index 4aec01dea..000000000 --- a/collectors/node.d.plugin/Makefile.in +++ /dev/null @@ -1,805 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/build/subst.inc \ - $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc \ - $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc \ - $(srcdir)/stiebeleltron/Makefile.inc $(srcdir)/Makefile.in \ - $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \ - $(dist_libconfig_DATA) $(dist_node_DATA) \ - $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \ - $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \ - $(dist_usernodeconfig_DATA) -subdir = collectors/node.d.plugin -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(pluginsdir)" \ - "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" \ - "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" \ - "$(DESTDIR)$(nodemoduleslibberdir)" \ - "$(DESTDIR)$(usernodeconfigdir)" -SCRIPTS = $(dist_plugins_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_libconfig_DATA) $(dist_node_DATA) \ - $(dist_nodeconfig_DATA) $(dist_nodemodules_DATA) \ - $(dist_nodemoduleslibber_DATA) $(dist_noinst_DATA) \ - $(dist_usernodeconfig_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - node.d.plugin \ - $(NULL) - -SUFFIXES = .in -dist_libconfig_DATA = \ - node.d.conf \ - $(NULL) - -dist_plugins_SCRIPTS = \ - node.d.plugin \ - $(NULL) - -# dist_nodeconfig_DATA += fronius/fronius.conf - -# do not install these files, but include them in the distribution -# dist_nodeconfig_DATA += named/named.conf - -# do not install these files, but include them in the distribution -# dist_nodeconfig_DATA += sma_webbox/sma_webbox.conf - -# do not install these files, but include them in the distribution -# dist_nodeconfig_DATA += snmp/snmp.conf - -# do not install these files, but include them in the distribution -# dist_nodeconfig_DATA += stiebeleltron/stiebeleltron.conf - -# do not install these files, but include them in the distribution -dist_noinst_DATA = node.d.plugin.in README.md $(NULL) \ - fronius/README.md fronius/Makefile.inc named/README.md \ - named/Makefile.inc sma_webbox/README.md \ - sma_webbox/Makefile.inc snmp/README.md snmp/Makefile.inc \ - stiebeleltron/README.md stiebeleltron/Makefile.inc -usernodeconfigdir = $(configdir)/node.d -dist_usernodeconfig_DATA = \ - $(top_srcdir)/installer/.keep \ - $(NULL) - -nodeconfigdir = $(libconfigdir)/node.d -dist_nodeconfig_DATA = \ - $(top_srcdir)/installer/.keep \ - $(NULL) - - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files -dist_node_DATA = $(NULL) fronius/fronius.node.js named/named.node.js \ - sma_webbox/sma_webbox.node.js snmp/snmp.node.js \ - stiebeleltron/stiebeleltron.node.js -nodemodulesdir = $(nodedir)/node_modules -dist_nodemodules_DATA = \ - node_modules/netdata.js \ - node_modules/extend.js \ - node_modules/pixl-xml.js \ - node_modules/net-snmp.js \ - node_modules/asn1-ber.js \ - $(NULL) - -nodemoduleslibberdir = $(nodedir)/node_modules/lib/ber -dist_nodemoduleslibber_DATA = \ - node_modules/lib/ber/index.js \ - node_modules/lib/ber/errors.js \ - node_modules/lib/ber/reader.js \ - node_modules/lib/ber/types.js \ - node_modules/lib/ber/writer.js \ - $(NULL) - -all: all-am - -.SUFFIXES: -.SUFFIXES: .in -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/node.d.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/build/subst.inc $(srcdir)/fronius/Makefile.inc $(srcdir)/named/Makefile.inc $(srcdir)/sma_webbox/Makefile.inc $(srcdir)/snmp/Makefile.inc $(srcdir)/stiebeleltron/Makefile.inc: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pluginsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) -install-dist_libconfigDATA: $(dist_libconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ - done - -uninstall-dist_libconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_nodeDATA: $(dist_node_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(nodedir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(nodedir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodedir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(nodedir)" || exit $$?; \ - done - -uninstall-dist_nodeDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_node_DATA)'; test -n "$(nodedir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(nodedir)'; $(am__uninstall_files_from_dir) -install-dist_nodeconfigDATA: $(dist_nodeconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(nodeconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(nodeconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodeconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(nodeconfigdir)" || exit $$?; \ - done - -uninstall-dist_nodeconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_nodeconfig_DATA)'; test -n "$(nodeconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(nodeconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_nodemodulesDATA: $(dist_nodemodules_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(nodemodulesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(nodemodulesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemodulesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemodulesdir)" || exit $$?; \ - done - -uninstall-dist_nodemodulesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_nodemodules_DATA)'; test -n "$(nodemodulesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(nodemodulesdir)'; $(am__uninstall_files_from_dir) -install-dist_nodemoduleslibberDATA: $(dist_nodemoduleslibber_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(nodemoduleslibberdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(nodemoduleslibberdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nodemoduleslibberdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(nodemoduleslibberdir)" || exit $$?; \ - done - -uninstall-dist_nodemoduleslibberDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_nodemoduleslibber_DATA)'; test -n "$(nodemoduleslibberdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(nodemoduleslibberdir)'; $(am__uninstall_files_from_dir) -install-dist_usernodeconfigDATA: $(dist_usernodeconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(usernodeconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(usernodeconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(usernodeconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(usernodeconfigdir)" || exit $$?; \ - done - -uninstall-dist_usernodeconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_usernodeconfig_DATA)'; test -n "$(usernodeconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(usernodeconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(nodedir)" "$(DESTDIR)$(nodeconfigdir)" "$(DESTDIR)$(nodemodulesdir)" "$(DESTDIR)$(nodemoduleslibberdir)" "$(DESTDIR)$(usernodeconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_libconfigDATA install-dist_nodeDATA \ - install-dist_nodeconfigDATA install-dist_nodemodulesDATA \ - install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \ - install-dist_usernodeconfigDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \ - uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \ - uninstall-dist_nodemoduleslibberDATA \ - uninstall-dist_pluginsSCRIPTS \ - uninstall-dist_usernodeconfigDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_libconfigDATA \ - install-dist_nodeDATA install-dist_nodeconfigDATA \ - install-dist_nodemodulesDATA \ - install-dist_nodemoduleslibberDATA install-dist_pluginsSCRIPTS \ - install-dist_usernodeconfigDATA install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_libconfigDATA uninstall-dist_nodeDATA \ - uninstall-dist_nodeconfigDATA uninstall-dist_nodemodulesDATA \ - uninstall-dist_nodemoduleslibberDATA \ - uninstall-dist_pluginsSCRIPTS \ - uninstall-dist_usernodeconfigDATA - -.in: - if sed \ - -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ - -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ - -e 's#[@]configdir_POST@#$(configdir)#g' \ - -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ - -e 's#[@]cachedir_POST@#$(cachedir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/node.d.plugin/README.md b/collectors/node.d.plugin/README.md index af8708c7b..265b1ac56 100644 --- a/collectors/node.d.plugin/README.md +++ b/collectors/node.d.plugin/README.md @@ -230,3 +230,5 @@ The `service` object defines a set of functions to allow you send information to *FIXME: document an operational node.d.plugin data collector - the best example is the [snmp collector](snmp/snmp.node.js)* + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/node.d.plugin/fronius/README.md b/collectors/node.d.plugin/fronius/README.md index dd2846990..72522637c 100644 --- a/collectors/node.d.plugin/fronius/README.md +++ b/collectors/node.d.plugin/fronius/README.md @@ -118,3 +118,5 @@ The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this: } } ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Ffronius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/node.d.plugin/named/README.md b/collectors/node.d.plugin/named/README.md index 977a5015f..480cbc199 100644 --- a/collectors/node.d.plugin/named/README.md +++ b/collectors/node.d.plugin/named/README.md @@ -340,3 +340,5 @@ Verify it works by running the following command (the collector is written in no curl "http://localhost:8888/json/v1/server" ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fnamed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/node.d.plugin/node.d.plugin b/collectors/node.d.plugin/node.d.plugin deleted file mode 100644 index 2570220c2..000000000 --- a/collectors/node.d.plugin/node.d.plugin +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/env bash -':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" - -// shebang hack from: -// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang - -// Initially this is run as a shell script. -// Then, the second line, finds nodejs or node or js in the system path -// and executes it with the shell parameters. - -// netdata -// real-time performance and health monitoring, done right! -// (C) 2017 Costa Tsaousis -// SPDX-License-Identifier: GPL-3.0-or-later - -// -------------------------------------------------------------------------------------------------------------------- - -'use strict'; - -// -------------------------------------------------------------------------------------------------------------------- -// get NETDATA environment variables - -var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname; -var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '/usr/local/etc/netdata'; -var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '/usr/local/lib/netdata/conf.d'; -var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1; -var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d'; - -// make sure the modules are found -process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules'); -process.mainModule.paths.unshift(NODE_D_DIR); - - -// -------------------------------------------------------------------------------------------------------------------- -// load required modules - -var fs = require('fs'); -var url = require('url'); -var util = require('util'); -var http = require('http'); -var path = require('path'); -var extend = require('extend'); -var netdata = require('netdata'); - - -// -------------------------------------------------------------------------------------------------------------------- -// configuration - -function netdata_read_json_config_file(module_filename) { - var f = path.basename(module_filename); - - var ufilename, sfilename; - - var m = f.match('.plugin' + '$'); - if(m !== null) { - ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf'; - sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf'; - } - - m = f.match('.node.js' + '$'); - if(m !== null) { - ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf'; - sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf'; - } - - try { - netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename); - return JSON.parse(fs.readFileSync(ufilename, 'utf8')); - } - catch(e) { - netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.'); - dumpError(e); - } - - try { - netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename); - return JSON.parse(fs.readFileSync(sfilename, 'utf8')); - } - catch(e) { - netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.'); - dumpError(e); - } - - return {}; -} - -// internal defaults -extend(true, netdata.options, { - filename: path.basename(__filename), - - update_every: NETDATA_UPDATE_EVERY, - - paths: { - plugins: NETDATA_PLUGINS_DIR, - config: NETDATA_USER_CONFIG_DIR, - stock_config: NETDATA_STOCK_CONFIG_DIR, - modules: [] - }, - - modules_enable_autodetect: true, - modules_enable_all: true, - modules: {} -}); - -// load configuration file -netdata.options_loaded = netdata_read_json_config_file(__filename); -extend(true, netdata.options, netdata.options_loaded); - -if(!netdata.options.paths.plugins) - netdata.options.paths.plugins = NETDATA_PLUGINS_DIR; - -if(!netdata.options.paths.config) - netdata.options.paths.config = NETDATA_USER_CONFIG_DIR; - -if(!netdata.options.paths.stock_config) - netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR; - -// console.error('merged netdata object:'); -// console.error(util.inspect(netdata, {depth: 10})); - - -// apply module paths to node.js process -function applyModulePaths() { - var len = netdata.options.paths.modules.length; - while(len--) - process.mainModule.paths.unshift(netdata.options.paths.modules[len]); -} -applyModulePaths(); - - -// -------------------------------------------------------------------------------------------------------------------- -// tracing - -function dumpError(err) { - if (typeof err === 'object') { - if (err.stack) { - netdata.debug(err.stack); - } - } -} - -// -------------------------------------------------------------------------------------------------------------------- -// get command line arguments -{ - var found_myself = false; - var found_number = false; - var found_modules = false; - process.argv.forEach(function (val, index, array) { - netdata.debug('PARAM: ' + val); - - if(!found_myself) { - if(val === __filename) - found_myself = true; - } - else { - switch(val) { - case 'debug': - netdata.options.DEBUG = true; - netdata.debug('DEBUG enabled'); - break; - - default: - if(found_number === true) { - if(found_modules === false) { - for(var i in netdata.options.modules) - netdata.options.modules[i].enabled = false; - } - - if(typeof netdata.options.modules[val] === 'undefined') - netdata.options.modules[val] = {}; - - netdata.options.modules[val].enabled = true; - netdata.options.modules_enable_all = false; - netdata.debug('enabled module ' + val); - } - else { - try { - var x = parseInt(val); - if(x > 0) { - netdata.options.update_every = x; - if(netdata.options.update_every < NETDATA_UPDATE_EVERY) { - netdata.options.update_every = NETDATA_UPDATE_EVERY; - netdata.debug('Update frequency ' + x + 's is too low'); - } - - found_number = true; - netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds'); - } - else netdata.error('Ignoring parameter: ' + val); - } - catch(e) { - netdata.error('Cannot get value of parameter: ' + val); - dumpError(e); - } - } - break; - } - } - }); -} - -if(netdata.options.update_every < 1) { - netdata.debug('Adjusting update frequency to 1 second'); - netdata.options.update_every = 1; -} - -// -------------------------------------------------------------------------------------------------------------------- -// find modules - -function findModules() { - var found = 0; - - var files = fs.readdirSync(NODE_D_DIR); - var len = files.length; - while(len--) { - var m = files[len].match('.node.js' + '$'); - if(m !== null) { - var n = files[len].substring(0, m.index); - - if(typeof(netdata.options.modules[n]) === 'undefined') - netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all }; - - if(netdata.options.modules[n].enabled === true) { - netdata.options.modules[n].name = n; - netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len]; - netdata.options.modules[n].loaded = false; - - // load the module - try { - netdata.debug('loading module ' + netdata.options.modules[n].filename); - netdata.options.modules[n].module = require(netdata.options.modules[n].filename); - netdata.options.modules[n].module.name = n; - netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename); - } - catch(e) { - netdata.options.modules[n].enabled = false; - netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e); - dumpError(e); - continue; - } - - // load its configuration - var c = { - enable_autodetect: netdata.options.modules_enable_autodetect, - update_every: netdata.options.update_every - }; - - var c2 = netdata_read_json_config_file(files[len]); - extend(true, c, c2); - - // call module auto-detection / configuration - try { - netdata.modules_configuring++; - netdata.debug('Configuring module ' + netdata.options.modules[n].name); - var serv = netdata.configure(netdata.options.modules[n].module, c, function() { - netdata.debug('Configured module ' + netdata.options.modules[n].name); - netdata.modules_configuring--; - }); - - netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.'); - } - catch(e) { - netdata.modules_configuring--; - netdata.options.modules[n].enabled = false; - netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.'); - dumpError(e); - continue; - } - - netdata.options.modules[n].loaded = true; - found++; - } - } - } - - // netdata.debug(netdata.options.modules); - return found; -} - -if(findModules() === 0) { - netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR); - netdata.disableNodePlugin(); - process.exit(1); -} - - -// -------------------------------------------------------------------------------------------------------------------- -// start - -function start_when_configuring_ends() { - if(netdata.modules_configuring > 0) { - netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring); - setTimeout(start_when_configuring_ends, 500); - return; - } - - netdata.modules_configuring = 0; - netdata.start(); -} -start_when_configuring_ends(); - -//netdata.debug('netdata object:') -//netdata.debug(netdata); diff --git a/collectors/node.d.plugin/sma_webbox/README.md b/collectors/node.d.plugin/sma_webbox/README.md index 1512c7008..cff7645df 100644 --- a/collectors/node.d.plugin/sma_webbox/README.md +++ b/collectors/node.d.plugin/sma_webbox/README.md @@ -1,4 +1,6 @@ +# SMA Sunny Webbox + [SMA Sunny Webbox](http://files.sma.de/dl/4253/WEBBOX-DUS131916W.pdf) Example netdata configuration for node.d/sma_webbox.conf @@ -23,3 +25,5 @@ The module supports any number of name servers, like this: ] } ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsma_webbox%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js index b9a168adc..aa60ae816 100644 --- a/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js +++ b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js @@ -73,10 +73,11 @@ var webbox = { if(found > 0 && service.added !== true) service.commit(); + // Grid Current Power Chart if(d['GriPwr'].value !== null) { - var id = 'smawebbox_' + service.name + '.current'; - var chart = webbox.charts[id]; + const id = 'smawebbox_' + service.name + '.current'; + let chart = webbox.charts[id]; if(typeof chart === 'undefined') { chart = { @@ -111,8 +112,8 @@ var webbox = { } if(d['GriEgyTdy'].value !== null) { - var id = 'smawebbox_' + service.name + '.today'; - var chart = webbox.charts[id]; + const id = 'smawebbox_' + service.name + '.today'; + let chart = webbox.charts[id]; if(typeof chart === 'undefined') { chart = { @@ -147,8 +148,8 @@ var webbox = { } if(d['GriEgyTot'].value !== null) { - var id = 'smawebbox_' + service.name + '.total'; - var chart = webbox.charts[id]; + const id = 'smawebbox_' + service.name + '.total'; + let chart = webbox.charts[id]; if(typeof chart === 'undefined') { chart = { diff --git a/collectors/node.d.plugin/snmp/README.md b/collectors/node.d.plugin/snmp/README.md index a307a3642..832108b94 100644 --- a/collectors/node.d.plugin/snmp/README.md +++ b/collectors/node.d.plugin/snmp/README.md @@ -92,16 +92,14 @@ In this example: `family` sets the name of the submenu of the dashboard each chart will appear under. -If you need to define many charts using incremental OIDs, you can use something like this: - -This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`. +`multiplier` and `divisor` are passed by the plugin to the Netdata daemon and are applied to the metric to convert it properly to `units`. For incremental counters with the exception of Counter64 type metrics, `offset` is added to the metric from within the SNMP plugin. This means that the value you will see in debug mode in the `DEBUG: setting current chart to... SET` line for a metric will not have been multiplied or divided, but it will have had the offset added to it. -Each of the 24 new charts will have its id (1-24) appended at: +
Caution: Counter64 metrics do not support `offset` (issue #5028). +The SNMP plugin supports Counter64 metrics with the only limitation that the `offset` parameter should not be defined. Due to the way Javascript handles large numbers and the fact that the offset is applied to metrics inside the plugin, the offset will be ignored silently. +
+
+If you need to define many charts using incremental OIDs, you can use something like this: -1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24` -2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24` -3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24` -3. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order) ```json { @@ -144,6 +142,16 @@ Each of the 24 new charts will have its id (1-24) appended at: } ``` +This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`. + +Each of the 24 new charts will have its id (1-24) appended at: + +1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24` +2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24` +3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24` +3. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order) + + The `options` given for each server, are: - `timeout`, the time to wait for the SNMP device to respond. The default is 5000 ms. @@ -355,3 +363,5 @@ This switch has a very slow SNMP processors. To respond, it needs about 8 second ] } ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fsnmp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/node.d.plugin/snmp/snmp.node.js b/collectors/node.d.plugin/snmp/snmp.node.js index a051d3d3a..6b33ae0d5 100644 --- a/collectors/node.d.plugin/snmp/snmp.node.js +++ b/collectors/node.d.plugin/snmp/snmp.node.js @@ -265,7 +265,7 @@ netdata.processors.snmp = { if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': failed ' + service.module.name + ' get for OIDs ' + varbinds[i].oid); - service.error('OID ' + varbinds[i].oid + ' gave error: ' + snmp.varbindError(varbinds[i])); + service.error('OID ' + varbinds[i].oid + ' gave error: ' + net_snmp.varbindError(varbinds[i])); value = null; failed++; } @@ -394,7 +394,7 @@ var snmp = { var d = dim_keys[j]; if (dimensions[d].value !== null) { - if(typeof dimensions[d].offset === 'number') + if(typeof dimensions[d].offset === 'number' && typeof dimensions[d].value === 'number') service.set(d, dimensions[d].value + dimensions[d].offset); else service.set(d, dimensions[d].value); diff --git a/collectors/node.d.plugin/stiebeleltron/README.md b/collectors/node.d.plugin/stiebeleltron/README.md index 002a31571..4aa5a43e8 100644 --- a/collectors/node.d.plugin/stiebeleltron/README.md +++ b/collectors/node.d.plugin/stiebeleltron/README.md @@ -503,3 +503,5 @@ The charts are being generated using the configuration below. So if your install ] } ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fnode.d.plugin%2Fstiebeleltron%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/plugins.d/Makefile.in b/collectors/plugins.d/Makefile.in deleted file mode 100644 index b2c112811..000000000 --- a/collectors/plugins.d/Makefile.in +++ /dev/null @@ -1,647 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/plugins.d -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/plugins.d/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/plugins.d/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md index c5981803c..6f5294cd6 100644 --- a/collectors/plugins.d/README.md +++ b/collectors/plugins.d/README.md @@ -1,4 +1,4 @@ -# Netdata External Plugins +# External plugins overview `plugins.d` is the netdata internal plugin that collects metrics from external processes, thus allowing netdata to use **external plugins**. @@ -9,6 +9,7 @@ plugin|language|O/S|description :---:|:---:|:---:|:--- [apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. [charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+. +[cups.plugin](../cups.plugin/)|`C`|all|monitors **CUPS** [fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points. [freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers. [node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`. @@ -88,7 +89,7 @@ For example, for `apps.plugin` the following section is available: - `command options` allows giving additional command line options to the plugin. -Netdata will provide to the extrenal plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources. +Netdata will provide to the external plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources. Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1). @@ -390,8 +391,6 @@ or do not output the line at all. Of course, C is the most efficient way of collecting data. This is why netdata itself is written in C. -## Properly Writing Plugins - ## Writing Plugins Properly There are a few rules for writing plugins properly: @@ -403,7 +402,7 @@ There are a few rules for writing plugins properly: - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once. - Do the absolutely minimum while iterating to collect values repeatedly. - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection. - - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation white iterating to collect values. + - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation while iterating to collect values. - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation). 2. The best way to iterate at a constant pace is this pseudo code: @@ -471,3 +470,5 @@ There are a few rules for writing plugins properly: 4. If possible, try to autodetect if your plugin should be enabled, without any configuration. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fplugins.d%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/proc.plugin/Makefile.in b/collectors/proc.plugin/Makefile.in deleted file mode 100644 index f6db90c87..000000000 --- a/collectors/proc.plugin/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/proc.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/proc.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md old mode 100755 new mode 100644 index 123065655..de62aeca7 --- a/collectors/proc.plugin/README.md +++ b/collectors/proc.plugin/README.md @@ -2,6 +2,7 @@ - `/proc/net/dev` (all network interfaces for all their values) - `/proc/diskstats` (all disks for all their values) + - `/proc/mdstat` (status of RAID arrays) - `/proc/net/snmp` (total IPv4, TCP and UDP usage) - `/proc/net/snmp6` (total IPv6 usage) - `/proc/net/netstat` (more IPv4 usage) @@ -18,6 +19,7 @@ - `/proc/softirqs` (total and per core software interrupts) - `/proc/loadavg` (system load and total processes running) - `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography) + - `/sys/class/power_supply` (power supply properties) - `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`). - `netdata` (internal netdata resources utilization) @@ -117,7 +119,7 @@ Then edit `netdata.conf` and find the following section. This is the basic plugi # path to get h/w sector size = /sys/block/%s/queue/hw_sector_size # path to get h/w sector size for partitions = /sys/dev/block/%lu:%lu/subsystem/%s/../queue /hw_sector_size - + ``` For each virtual disk, physical disk and partition you will have a section like this: @@ -160,13 +162,52 @@ But sometimes you need disable performance metrics for all devices with the same 251 2 zram2 27487 0 219896 188 79953 0 639624 1640 0 1828 1828 251 3 zram3 27348 0 218784 152 79952 0 639616 1960 0 2060 2104 ``` -All zram devices starts with `251` number and all loop devices starts with `7`. +All zram devices starts with `251` number and all loop devices starts with `7`. So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section. ``` [plugin:proc:/proc/diskstats] performance metrics for disks with major 7 = no ``` +## Monitoring RAID arrays + +### Monitored RAID array metrics + +1. **Health** Number of failed disks in every array (aggregate chart). + +2. **Disks stats** + * total (number of devices array ideally would have) + * inuse (number of devices currently are in use) + +3. **Mismatch count** + * unsynchronized blocks + +4. **Current status** + * resync in percent + * recovery in percent + * reshape in percent + * check in percent + +5. **Operation status** (if resync/recovery/reshape/check is active) + * finish in minutes + * speed in megabytes/s + +6. **Nonredundant array availability** + +#### configuration + +``` +[plugin:proc:/proc/mdstat] + # faulty devices = yes + # nonredundant arrays availability = yes + # mismatch count = auto + # disk stats = yes + # operation status = yes + # make charts obsolete = yes + # filename to monitor = /proc/mdstat + # mismatch_cnt filename to monitor = /sys/block/%s/md/mismatch_cnt +``` + ## Monitoring CPUs The `/proc/stat` module monitors CPU utilization, interrupts, context switches, processes started/running, thermal throttling, frequency, and idle states. It gathers this information from multiple files. @@ -219,7 +260,7 @@ SYNPROXY is a netfilter module, in the Linux kernel (since version 3.12). It is The net effect of this, is that the real servers will not notice any change during the attack. The valid TCP connections will pass through and served, while the attack will be stopped at the firewall. -To use SYNPROXY on your firewall, please follow our setup guides: +Netdata does not enable SYNPROXY. It just uses the SYNPROXY metrics exposed by your kernel, so you will first need to configure it. The hard way is to run iptables SYNPROXY commands directly on the console. An easier way is to use [FireHOL](https://firehol.org/), which, is a firewall manager for iptables. FireHOL can configure SYNPROXY using the following setup guides: - **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)** - **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)** @@ -239,4 +280,67 @@ Example image: ![ddos](https://cloud.githubusercontent.com/assets/2662304/14398891/6016e3fc-fdf0-11e5-942b-55de6a52cb66.gif) -See Linux Anti-DDoS in action at: **[netdata demo site (with SYNPROXY enabled)](https://registry.my-netdata.io/#menu_netfilter_submenu_synproxy)** +See Linux Anti-DDoS in action at: **[netdata demo site (with SYNPROXY enabled)](https://registry.my-netdata.io/#menu_netfilter_submenu_synproxy)** + +## Linux power supply + +This module monitors various metrics reported by power supply drivers +on Linux. This allows tracking and alerting on things like remaining +battery capacity. + +Depending on the underlying driver, it may provide the following charts +and metrics: + +1. Capacity: The power supply capacity expressed as a percentage. + * capacity\_now + +2. Charge: The charge for the power supply, expressed as amphours. + * charge\_full\_design + * charge\_full + * charge\_now + * charge\_empty + * charge\_empty\_design + +3. Energy: The energy for the power supply, expressed as watthours. + * energy\_full\_design + * energy\_full + * energy\_now + * energy\_empty + * energy\_empty\_design + +2. Voltage: The voltage for the power supply, expressed as volts. + * voltage\_max\_design + * voltage\_max + * voltage\_now + * voltage\_min + * voltage\_min\_design + +#### configuration + +``` +[plugin:proc:/sys/class/power_supply] + # battery capacity = yes + # battery charge = no + # battery energy = no + # power supply voltage = no + # keep files open = auto + # directory to monitor = /sys/class/power_supply +``` + +#### notes + +* Most drivers provide at least the first chart. Battery powered ACPI +compliant systems (like most laptops) provide all but the third, but do +not provide all of the metrics for each chart. + +* Current, energy, and voltages are reported with a _very_ high precision +by the power\_supply framework. Usually, this is far higher than the +actual hardware supports reporting, so expect to see changes in these +charts jump instead of scaling smoothly. + +* If `max` or `full` attribute is defined by the driver, but not a +corresponding `min` or `empty` attribute, then Netdata will still provide +the corresponding `min` or `empty`, which will then always read as zero. +This way, alerts which match on these will still work. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fproc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c index 0c3244d61..343acfa37 100644 --- a/collectors/proc.plugin/plugin_proc.c +++ b/collectors/proc.plugin/plugin_proc.c @@ -49,6 +49,7 @@ static struct proc_module { // disk metrics { .name = "/proc/diskstats", .dim = "diskstats", .func = do_proc_diskstats }, + { .name = "/proc/mdstat", .dim = "mdstat", .func = do_proc_mdstat }, // NFS metrics { .name = "/proc/net/rpc/nfsd", .dim = "nfsd", .func = do_proc_net_rpc_nfsd }, @@ -63,6 +64,9 @@ static struct proc_module { // IPC metrics { .name = "ipc", .dim = "ipc", .func = do_ipc }, + // linux power supply metrics + { .name = "/sys/class/power_supply", .dim = "power_supply", .func = do_sys_class_power_supply }, + // the terminator of this array { .name = NULL, .dim = NULL, .func = NULL } }; diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h index bfefe1ad4..0c2afe779 100644 --- a/collectors/proc.plugin/plugin_proc.h +++ b/collectors/proc.plugin/plugin_proc.h @@ -26,6 +26,7 @@ extern void *proc_main(void *ptr); extern int do_proc_net_dev(int update_every, usec_t dt); extern int do_proc_diskstats(int update_every, usec_t dt); +extern int do_proc_mdstat(int update_every, usec_t dt); extern int do_proc_net_snmp(int update_every, usec_t dt); extern int do_proc_net_snmp6(int update_every, usec_t dt); extern int do_proc_net_netstat(int update_every, usec_t dt); @@ -52,6 +53,7 @@ extern int do_proc_net_sockstat(int update_every, usec_t dt); extern int do_proc_net_sockstat6(int update_every, usec_t dt); extern int do_proc_net_sctp_snmp(int update_every, usec_t dt); extern int do_ipc(int update_every, usec_t dt); +extern int do_sys_class_power_supply(int update_every, usec_t dt); extern int get_numa_node_count(void); // metrics that need to be shared among data collectors diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c index 387b395a3..51fe7f461 100644 --- a/collectors/proc.plugin/proc_diskstats.c +++ b/collectors/proc.plugin/proc_diskstats.c @@ -798,7 +798,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every); global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks); - + char buffer[FILENAME_MAX + 1]; snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s"); @@ -960,7 +960,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { , family , "disk.io" , "Disk I/O Bandwidth" - , "kilobytes/s" + , "KiB/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_DISKSTATS_NAME , NETDATA_CHART_PRIO_DISK_IO @@ -1055,7 +1055,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { , family , "disk.backlog" , "Disk Backlog" - , "backlog (ms)" + , "milliseconds" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_DISKSTATS_NAME , NETDATA_CHART_PRIO_DISK_BACKLOG @@ -1186,7 +1186,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { , family , "disk.await" , "Average Completed I/O Operation Time" - , "ms per operation" + , "milliseconds/operation" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_DISKSTATS_NAME , NETDATA_CHART_PRIO_DISK_AWAIT @@ -1217,7 +1217,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { , family , "disk.avgsz" , "Average Completed I/O Operation Bandwidth" - , "kilobytes per operation" + , "KiB/operation" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_DISKSTATS_NAME , NETDATA_CHART_PRIO_DISK_AVGSZ @@ -1248,7 +1248,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { , family , "disk.svctm" , "Average Service Time" - , "ms per operation" + , "milliseconds/operation" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_DISKSTATS_NAME , NETDATA_CHART_PRIO_DISK_SVCTM @@ -1385,7 +1385,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { , family , "disk.bcache_rates" , "BCache Rates" - , "KB/s" + , "KiB/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_DISKSTATS_NAME , NETDATA_CHART_PRIO_BCACHE_RATES @@ -1412,7 +1412,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { , family , "disk.bcache_size" , "BCache Cache Sizes" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_DISKSTATS_NAME , NETDATA_CHART_PRIO_BCACHE_SIZE @@ -1437,7 +1437,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { , family , "disk.bcache_usage" , "BCache Cache Usage" - , "percent" + , "percentage" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_DISKSTATS_NAME , NETDATA_CHART_PRIO_BCACHE_USAGE @@ -1563,7 +1563,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { , "disk" , NULL , "Disk I/O" - , "kilobytes/s" + , "KiB/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_DISKSTATS_NAME , NETDATA_CHART_PRIO_SYSTEM_IO diff --git a/collectors/proc.plugin/proc_mdstat.c b/collectors/proc.plugin/proc_mdstat.c new file mode 100644 index 000000000..d0925ec32 --- /dev/null +++ b/collectors/proc.plugin/proc_mdstat.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_MDSTAT_NAME "/proc/mdstat" + +struct raid { + int redundant; + char *name; + uint32_t hash; + + RRDDIM *rd_health; + unsigned long long failed_disks; + + RRDSET *st_disks; + RRDDIM *rd_total; + RRDDIM *rd_inuse; + unsigned long long total_disks; + unsigned long long inuse_disks; + + RRDSET *st_operation; + RRDDIM *rd_check; + RRDDIM *rd_resync; + RRDDIM *rd_recovery; + RRDDIM *rd_reshape; + unsigned long long check; + unsigned long long resync; + unsigned long long recovery; + unsigned long long reshape; + + RRDSET *st_finish; + RRDDIM *rd_finish_in; + unsigned long long finish_in; + + RRDSET *st_speed; + RRDDIM *rd_speed; + unsigned long long speed; + + char *mismatch_cnt_filename; + RRDSET *st_mismatch_cnt; + RRDDIM *rd_mismatch_cnt; + unsigned long long mismatch_cnt; + + RRDSET *st_nonredundant; + RRDDIM *rd_nonredundant; +}; + +struct old_raid { + int redundant; + char *name; + uint32_t hash; + int found; +}; + +static inline char *remove_trailing_chars(char *s, char c) { + while(*s) { + if(unlikely(*s == c)) { + *s = '\0'; + } + s++; + } + return s; +} + +static inline void make_chart_obsolete(char *name, const char *id_modifier) { + char id[50 + 1]; + RRDSET *st = NULL; + + if(likely(name && id_modifier)) { + snprintfz(id, 50, "mdstat.%s_%s", name, id_modifier); + st = rrdset_find_byname_localhost(id); + if(likely(st)) rrdset_is_obsolete(st); + } +} + +int do_proc_mdstat(int update_every, usec_t dt) { + (void)dt; + static procfile *ff = NULL; + static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1, do_mismatch_config = -1; + static int make_charts_obsolete = -1; + static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL; + static struct raid *raids = NULL; + static size_t raids_allocated = 0; + size_t raids_num = 0, raid_idx = 0, redundant_num = 0; + static struct old_raid *old_raids = NULL; + static size_t old_raids_allocated = 0; + size_t old_raid_idx = 0; + + if(unlikely(do_health == -1)){ + do_health = config_get_boolean("plugin:proc:/proc/mdstat", "faulty devices", CONFIG_BOOLEAN_YES); + do_nonredundant = config_get_boolean("plugin:proc:/proc/mdstat", "nonredundant arrays availability", CONFIG_BOOLEAN_YES); + do_mismatch_config = config_get_boolean_ondemand("plugin:proc:/proc/mdstat", "mismatch count", CONFIG_BOOLEAN_AUTO); + do_disks = config_get_boolean("plugin:proc:/proc/mdstat", "disk stats", CONFIG_BOOLEAN_YES); + do_operations = config_get_boolean("plugin:proc:/proc/mdstat", "operation status", CONFIG_BOOLEAN_YES); + + make_charts_obsolete = config_get_boolean("plugin:proc:/proc/mdstat", "make charts obsolete", CONFIG_BOOLEAN_YES); + + char filename[FILENAME_MAX + 1]; + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/mdstat"); + mdstat_filename = config_get("plugin:proc:/proc/mdstat", "filename to monitor", filename); + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/md/mismatch_cnt"); + mismatch_cnt_filename = config_get("plugin:proc:/proc/mdstat", "mismatch_cnt filename to monitor", filename); + } + + if(unlikely(!ff)) { + ff = procfile_open(mdstat_filename, " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 0; // we return 0, so that we will retry opening it next time + + size_t lines = procfile_lines(ff); + size_t words = 0; + + if(unlikely(lines < 2)) { + error("Cannot read /proc/mdstat. Expected 2 or more lines, read %zu.", lines); + return 1; + } + + // find how many raids are there + size_t l; + raids_num = 0; + for(l = 1; l < lines - 2 ; l++) { + if(unlikely(procfile_lineword(ff, l, 1)[0] == 'a')) // check if the raid is active + raids_num++; + } + + if(unlikely(!raids_num && !old_raids_allocated)) return 0; // we return 0, so that we will retry searching for raids next time + + // allocate the memory we need; + if(unlikely(raids_num != raids_allocated)) { + for(raid_idx = 0; raid_idx < raids_allocated; raid_idx++) { + struct raid *raid = &raids[raid_idx]; + freez(raid->name); + freez(raid->mismatch_cnt_filename); + } + if(raids_num) { + raids = (struct raid *)reallocz(raids, raids_num * sizeof(struct raid)); + memset(raids, 0, raids_num * sizeof(struct raid)); + } + else { + freez(raids); + raids = NULL; + } + raids_allocated = raids_num; + } + + // loop through all lines except the first and the last ones + for(l = 1, raid_idx = 0; l < (lines - 2) && raid_idx < raids_num; l++) { + struct raid *raid = &raids[raid_idx]; + raid->redundant = 0; + + words = procfile_linewords(ff, l); + if(unlikely(words < 2)) continue; + + if(unlikely(procfile_lineword(ff, l, 1)[0] != 'a')) continue; + if(unlikely(!raid->name)) { + raid->name = strdupz(procfile_lineword(ff, l, 0)); + raid->hash = simple_hash(raid->name); + } + else if(unlikely(strcmp(raid->name, procfile_lineword(ff, l, 0)))) { + freez(raid->name); + freez(raid->mismatch_cnt_filename); + memset(raid, 0, sizeof(struct raid)); + raid->name = strdupz(procfile_lineword(ff, l, 0)); + raid->hash = simple_hash(raid->name); + } + if(unlikely(!raid->name || !raid->name[0])) continue; + raid_idx++; + + // check if raid has disk status + l++; + words = procfile_linewords(ff, l); + if(words < 2 || procfile_lineword(ff, l, words - 1)[0] != '[') continue; + + // split inuse and total number of disks + if(likely(do_health || do_disks)) { + char *s = NULL, *str_total = NULL, *str_inuse = NULL; + + s = procfile_lineword(ff, l, words - 2); + if(unlikely(s[0] != '[')) { + error("Cannot read /proc/mdstat raid health status. Unexpected format: missing opening bracket."); + continue; + } + str_total = ++s; + while(*s) { + if(unlikely(*s == '/')) { + *s = '\0'; + str_inuse = s + 1; + } + else if(unlikely(*s == ']')) { + *s = '\0'; + break; + } + s++; + } + if(unlikely(str_total[0] == '\0' || str_inuse[0] == '\0')) { + error("Cannot read /proc/mdstat raid health status. Unexpected format."); + continue; + } + + raid->inuse_disks = str2ull(str_inuse); + raid->total_disks = str2ull(str_total); + raid->failed_disks = raid->total_disks - raid->inuse_disks; + } + + raid->redundant = 1; + redundant_num++; + l++; + + // check if any operation is performed on the raid + if(likely(do_operations)) { + char *s = NULL; + + raid->check = 0; + raid->resync = 0; + raid->recovery = 0; + raid->reshape = 0; + raid->finish_in = 0; + raid->speed = 0; + + words = procfile_linewords(ff, l); + if(likely(words < 2)) continue; + if(unlikely(procfile_lineword(ff, l, 0)[0] != '[')) continue; + if(unlikely(words < 7)) { + error("Cannot read /proc/mdstat line. Expected 7 params, read %zu.", words); + continue; + } + + char *word; + word = procfile_lineword(ff, l, 3); + remove_trailing_chars(word, '%'); + + unsigned long long percentage = (unsigned long long)(str2ld(word, NULL) * 100); + // possible operations: check, resync, recovery, reshape + // 4-th character is unique for each operation so it is checked + switch(procfile_lineword(ff, l, 1)[3]) { + case 'c': // check + raid->check = percentage; + break; + case 'y': // resync + raid->resync = percentage; + break; + case 'o': // recovery + raid->recovery = percentage; + break; + case 'h': // reshape + raid->reshape = percentage; + break; + } + + word = procfile_lineword(ff, l, 5); + s = remove_trailing_chars(word, 'm'); // remove trailing "min" + + word += 7; // skip leading "finish=" + + if(likely(s > word)) + raid->finish_in = (unsigned long long)(str2ld(word, NULL) * 60); + + word = procfile_lineword(ff, l, 6); + s = remove_trailing_chars(word, 'K'); // remove trailing "K/sec" + + word += 6; // skip leading "speed=" + + if(likely(s > word)) + raid->speed = str2ull(word); + } + } + + // read mismatch_cnt files + if(do_mismatch == -1) { + if(do_mismatch_config == CONFIG_BOOLEAN_AUTO) { + if(raids_num > 50) + do_mismatch = CONFIG_BOOLEAN_NO; + else + do_mismatch = CONFIG_BOOLEAN_YES; + } + else + do_mismatch = do_mismatch_config; + } + + if(likely(do_mismatch)) { + for(raid_idx = 0; raid_idx < raids_num ; raid_idx++) { + char filename[FILENAME_MAX + 1]; + struct raid *raid = &raids[raid_idx]; + + if(likely(raid->redundant)) { + if(unlikely(!raid->mismatch_cnt_filename)) { + snprintfz(filename, FILENAME_MAX, mismatch_cnt_filename, raid->name); + raid->mismatch_cnt_filename = strdupz(filename); + } + if(unlikely(read_single_number_file(raid->mismatch_cnt_filename, &raid->mismatch_cnt))) { + error("Cannot read file '%s'", raid->mismatch_cnt_filename); + do_mismatch = CONFIG_BOOLEAN_NO; + error("Monitoring for mismatch count has been disabled"); + break; + } + } + } + } + + // check for disappeared raids + for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) { + struct old_raid *old_raid = &old_raids[old_raid_idx]; + int found = 0; + + for(raid_idx = 0; raid_idx < raids_num ; raid_idx++) { + struct raid *raid = &raids[raid_idx]; + + if(unlikely(raid->hash == old_raid->hash + && !strcmp(raid->name, old_raid->name) + && raid->redundant == old_raid->redundant)) found = 1; + } + + old_raid->found = found; + } + + int raid_disappeared = 0; + for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) { + struct old_raid *old_raid = &old_raids[old_raid_idx]; + + if(unlikely(!old_raid->found)) { + if(likely(make_charts_obsolete)) { + make_chart_obsolete(old_raid->name, "disks"); + make_chart_obsolete(old_raid->name, "mismatch"); + make_chart_obsolete(old_raid->name, "operation"); + make_chart_obsolete(old_raid->name, "finish"); + make_chart_obsolete(old_raid->name, "speed"); + make_chart_obsolete(old_raid->name, "availability"); + } + raid_disappeared = 1; + } + } + + // allocate memory for nonredundant arrays + if(unlikely(raid_disappeared || old_raids_allocated != raids_num)) { + for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) { + freez(old_raids[old_raid_idx].name); + } + if(likely(raids_num)) { + old_raids = reallocz(old_raids, sizeof(struct old_raid) * raids_num); + memset(old_raids, 0, sizeof(struct old_raid) * raids_num); + } + else { + freez(old_raids); + old_raids = NULL; + } + old_raids_allocated = raids_num; + for(old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) { + struct old_raid *old_raid = &old_raids[old_raid_idx]; + struct raid *raid = &raids[old_raid_idx]; + + old_raid->name = strdupz(raid->name); + old_raid->hash = raid->hash; + old_raid->redundant = raid->redundant; + } + } + + // -------------------------------------------------------------------- + + if(likely(do_health && redundant_num)) { + static RRDSET *st_mdstat_health = NULL; + if(unlikely(!st_mdstat_health)) { + st_mdstat_health = rrdset_create_localhost( + "mdstat" + , "mdstat_health" + , NULL + , "health" + , "md.health" + , "Faulty Devices In MD" + , "failed disks" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MDSTAT_NAME + , NETDATA_CHART_PRIO_MDSTAT_HEALTH + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_isnot_obsolete(st_mdstat_health); + } + else + rrdset_next(st_mdstat_health); + + if(!redundant_num) { + if(likely(make_charts_obsolete)) make_chart_obsolete("mdstat", "health"); + } + else { + for(raid_idx = 0; raid_idx < raids_num; raid_idx++) { + struct raid *raid = &raids[raid_idx]; + + if(likely(raid->redundant)) { + if(unlikely(!raid->rd_health && !(raid->rd_health = rrddim_find(st_mdstat_health, raid->name)))) + raid->rd_health = rrddim_add(st_mdstat_health, raid->name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(st_mdstat_health, raid->rd_health, raid->failed_disks); + } + } + + rrdset_done(st_mdstat_health); + } + } + + // -------------------------------------------------------------------- + + for(raid_idx = 0; raid_idx < raids_num ; raid_idx++) { + struct raid *raid = &raids[raid_idx]; + char id[50 + 1]; + char family[50 + 1]; + + if(likely(raid->redundant)) { + if(likely(do_disks)) { + snprintfz(id, 50, "%s_disks", raid->name); + + if(unlikely(!raid->st_disks && !(raid->st_disks = rrdset_find_byname_localhost(id)))) { + snprintfz(family, 50, "%s", raid->name); + + raid->st_disks = rrdset_create_localhost( + "mdstat" + , id + , NULL + , family + , "md.disks" + , "Disks Stats" + , "disks" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MDSTAT_NAME + , NETDATA_CHART_PRIO_MDSTAT_DISKS + raid_idx * 10 + , update_every + , RRDSET_TYPE_STACKED + ); + + rrdset_isnot_obsolete(raid->st_disks); + } + else + rrdset_next(raid->st_disks); + + if(unlikely(!raid->rd_inuse && !(raid->rd_inuse = rrddim_find(raid->st_disks, "inuse")))) + raid->rd_inuse = rrddim_add(raid->st_disks, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + if(unlikely(!raid->rd_total && !(raid->rd_total = rrddim_find(raid->st_disks, "total")))) + raid->rd_total = rrddim_add(raid->st_disks, "total", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(raid->st_disks, raid->rd_inuse, raid->inuse_disks); + rrddim_set_by_pointer(raid->st_disks, raid->rd_total, raid->total_disks); + + rrdset_done(raid->st_disks); + } + + // -------------------------------------------------------------------- + + if(likely(do_mismatch)) { + snprintfz(id, 50, "%s_mismatch", raid->name); + + if(unlikely(!raid->st_mismatch_cnt && !(raid->st_mismatch_cnt = rrdset_find_byname_localhost(id)))) { + snprintfz(family, 50, "%s", raid->name); + + raid->st_mismatch_cnt = rrdset_create_localhost( + "mdstat" + , id + , NULL + , family + , "md.mismatch_cnt" + , "Mismatch Count" + , "unsynchronized blocks" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MDSTAT_NAME + , NETDATA_CHART_PRIO_MDSTAT_MISMATCH + raid_idx * 10 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_isnot_obsolete(raid->st_mismatch_cnt); + } + else + rrdset_next(raid->st_mismatch_cnt); + + if(unlikely(!raid->rd_mismatch_cnt && !(raid->rd_mismatch_cnt = rrddim_find(raid->st_mismatch_cnt, "count")))) + raid->rd_mismatch_cnt = rrddim_add(raid->st_mismatch_cnt, "count", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(raid->st_mismatch_cnt, raid->rd_mismatch_cnt, raid->mismatch_cnt); + + rrdset_done(raid->st_mismatch_cnt); + } + + // -------------------------------------------------------------------- + + if(likely(do_operations)) { + snprintfz(id, 50, "%s_operation", raid->name); + + if(unlikely(!raid->st_operation && !(raid->st_operation = rrdset_find_byname_localhost(id)))) { + snprintfz(family, 50, "%s", raid->name); + + raid->st_operation = rrdset_create_localhost( + "mdstat" + , id + , NULL + , family + , "md.status" + , "Current Status" + , "percent" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MDSTAT_NAME + , NETDATA_CHART_PRIO_MDSTAT_OPERATION + raid_idx * 10 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_isnot_obsolete(raid->st_operation); + } + else + rrdset_next(raid->st_operation); + + if(unlikely(!raid->rd_check && !(raid->rd_check = rrddim_find(raid->st_operation, "check")))) + raid->rd_check = rrddim_add(raid->st_operation, "check", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + if(unlikely(!raid->rd_resync && !(raid->rd_resync = rrddim_find(raid->st_operation, "resync")))) + raid->rd_resync = rrddim_add(raid->st_operation, "resync", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + if(unlikely(!raid->rd_recovery && !(raid->rd_recovery = rrddim_find(raid->st_operation, "recovery")))) + raid->rd_recovery = rrddim_add(raid->st_operation, "recovery", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + if(unlikely(!raid->rd_reshape && !(raid->rd_reshape = rrddim_find(raid->st_operation, "reshape")))) + raid->rd_reshape = rrddim_add(raid->st_operation, "reshape", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(raid->st_operation, raid->rd_check, raid->check); + rrddim_set_by_pointer(raid->st_operation, raid->rd_resync, raid->resync); + rrddim_set_by_pointer(raid->st_operation, raid->rd_recovery, raid->recovery); + rrddim_set_by_pointer(raid->st_operation, raid->rd_reshape, raid->reshape); + + rrdset_done(raid->st_operation); + + // -------------------------------------------------------------------- + + snprintfz(id, 50, "%s_finish", raid->name); + + if(unlikely(!raid->st_finish && !(raid->st_finish = rrdset_find_byname_localhost(id)))) { + snprintfz(family, 50, "%s", raid->name); + + raid->st_finish = rrdset_create_localhost( + "mdstat" + , id + , NULL + , family + , "md.rate" + , "Approximate Time Unit Finish" + , "seconds" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MDSTAT_NAME + , NETDATA_CHART_PRIO_MDSTAT_FINISH + raid_idx * 10 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_isnot_obsolete(raid->st_finish); + } + else + rrdset_next(raid->st_finish); + + if(unlikely(!raid->rd_finish_in && !(raid->rd_finish_in = rrddim_find(raid->st_finish, "finish_in")))) + raid->rd_finish_in = rrddim_add(raid->st_finish, "finish_in", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(raid->st_finish, raid->rd_finish_in, raid->finish_in); + + rrdset_done(raid->st_finish); + + // -------------------------------------------------------------------- + + snprintfz(id, 50, "%s_speed", raid->name); + + if(unlikely(!raid->st_speed && !(raid->st_speed = rrdset_find_byname_localhost(id)))) { + snprintfz(family, 50, "%s", raid->name); + + raid->st_speed = rrdset_create_localhost( + "mdstat" + , id + , NULL + , family + , "md.rate" + , "Operation Speed" + , "KiB/s" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MDSTAT_NAME + , NETDATA_CHART_PRIO_MDSTAT_SPEED + raid_idx * 10 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_isnot_obsolete(raid->st_speed); + } + else + rrdset_next(raid->st_speed); + + if(unlikely(!raid->rd_speed && !(raid->rd_speed = rrddim_find(raid->st_speed, "speed")))) + raid->rd_speed = rrddim_add(raid->st_speed, "speed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(raid->st_speed, raid->rd_speed, raid->speed); + + rrdset_done(raid->st_speed); + } + } + else { + + // -------------------------------------------------------------------- + + if(likely(do_nonredundant)) { + snprintfz(id, 50, "%s_availability", raid->name); + + if(unlikely(!raid->st_nonredundant && !(raid->st_nonredundant = rrdset_find_localhost(id)))) { + snprintfz(family, 50, "%s", raid->name); + + raid->st_nonredundant = rrdset_create_localhost( + "mdstat" + , id + , NULL + , family + , "md.nonredundant" + , "Nonredundant Array Availability" + , "boolean" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_MDSTAT_NAME + , NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT + raid_idx * 10 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_isnot_obsolete(raid->st_nonredundant); + } + else + rrdset_next(raid->st_nonredundant); + + if(unlikely(!raid->rd_nonredundant && !(raid->rd_nonredundant = rrddim_find(raid->st_nonredundant, "available")))) + raid->rd_nonredundant = rrddim_add(raid->st_nonredundant, "available", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + + rrddim_set_by_pointer(raid->st_nonredundant, raid->rd_nonredundant, 1); + + rrdset_done(raid->st_nonredundant); + } + } + } + + return 0; +} diff --git a/collectors/proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c index f77159ebd..ae399c440 100644 --- a/collectors/proc.plugin/proc_meminfo.c +++ b/collectors/proc.plugin/proc_meminfo.c @@ -146,7 +146,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { // -------------------------------------------------------------------- // http://stackoverflow.com/questions/3019748/how-to-reliably-measure-available-memory-in-linux - unsigned long long MemCached = Cached + Slab; + unsigned long long MemCached = Cached + SReclaimable; unsigned long long MemUsed = MemTotal - MemFree - MemCached - Buffers; if(do_ram) { @@ -162,7 +162,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "ram" , NULL , "System RAM" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_SYSTEM_RAM @@ -197,7 +197,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "system" , NULL , "Available RAM for applications" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE @@ -233,7 +233,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "swap" , NULL , "System Swap" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_SYSTEM_SWAP @@ -270,7 +270,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "ecc" , NULL , "Corrupted Memory, detected by ECC" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_MEM_HW @@ -303,7 +303,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "system" , NULL , "Committed (Allocated) Memory" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED @@ -336,7 +336,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "kernel" , NULL , "Writeback Memory" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_MEM_KERNEL @@ -376,7 +376,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "kernel" , NULL , "Memory Used by Kernel" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_MEM_KERNEL + 1 @@ -415,7 +415,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "slab" , NULL , "Reclaimable Kernel Memory" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_MEM_SLAB @@ -452,7 +452,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "hugepages" , NULL , "Dedicated HugePages Memory" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_MEM_HUGEPAGES + 1 @@ -493,7 +493,7 @@ int do_proc_meminfo(int update_every, usec_t dt) { , "hugepages" , NULL , "Transparent HugePages Memory" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_MEMINFO_NAME , NETDATA_CHART_PRIO_MEM_HUGEPAGES diff --git a/collectors/proc.plugin/proc_net_rpc_nfsd.c b/collectors/proc.plugin/proc_net_rpc_nfsd.c index 20b87e9dd..29ef7a394 100644 --- a/collectors/proc.plugin/proc_net_rpc_nfsd.c +++ b/collectors/proc.plugin/proc_net_rpc_nfsd.c @@ -657,7 +657,7 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { , "threads" , NULL , "NFS Server Threads Full Count" - , "ops/s" + , "events" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NFSD_NAME , NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c index 0c3b6e196..ff9cc5230 100644 --- a/collectors/proc.plugin/proc_net_sockstat.c +++ b/collectors/proc.plugin/proc_net_sockstat.c @@ -305,7 +305,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , "tcp" , NULL , "IPv4 TCP Sockets Memory" - , "KB" + , "KiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME , NETDATA_CHART_PRIO_IPV4_TCP_MEM @@ -369,7 +369,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , "udp" , NULL , "IPv4 UDP Sockets Memory" - , "KB" + , "KiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME , NETDATA_CHART_PRIO_IPV4_UDP_MEM @@ -497,7 +497,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { , "fragments" , NULL , "IPv4 FRAG Sockets Memory" - , "KB" + , "KiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM diff --git a/collectors/proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c index f0c1f47c1..312ded5ba 100644 --- a/collectors/proc.plugin/proc_net_stat_synproxy.c +++ b/collectors/proc.plugin/proc_net_stat_synproxy.c @@ -101,7 +101,7 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { , RRD_TYPE_NET_STAT_SYNPROXY , NULL , "SYNPROXY SYN Packets received" - , "SYN/s" + , "packets/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_SYNPROXY_NAME , NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c index a96b236cb..c6557289d 100644 --- a/collectors/proc.plugin/proc_spl_kstat_zfs.c +++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c @@ -10,7 +10,9 @@ extern struct arcstats arcstats; int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) { (void)dt; + static int show_zero_charts = 0, do_zfs_stats = 0; static procfile *ff = NULL; + static char *dirname = NULL; static ARL_BASE *arl_base = NULL; arcstats.l2exist = -1; @@ -117,8 +119,45 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) { ff = procfile_open(config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); if(unlikely(!ff)) return 1; + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/spl/kstat/zfs"); + dirname = config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "directory to monitor", filename); + + show_zero_charts = config_get_boolean_ondemand("plugin:proc:" ZFS_PROC_ARCSTATS, "show zero charts", CONFIG_BOOLEAN_NO); + if(unlikely(show_zero_charts == CONFIG_BOOLEAN_YES)) + do_zfs_stats = 1; + } + + // check if any pools exist + if(likely(!do_zfs_stats)) { + DIR *dir = opendir(dirname); + if(unlikely(!dir)) { + error("Cannot read directory '%s'", dirname); + return 1; + } + + struct dirent *de = NULL; + while(likely(de = readdir(dir))) { + if(likely(de->d_type == DT_DIR + && ( + (de->d_name[0] == '.' && de->d_name[1] == '\0') + || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + ))) + continue; + + if(unlikely(de->d_type == DT_LNK || de->d_type == DT_DIR)) { + do_zfs_stats = 1; + break; + } + } + + closedir(dir); } + // do not show ZFS filesystem metrics if there haven't been any pools in the system yet + if(unlikely(!do_zfs_stats)) + return 0; + ff = procfile_readall(ff); if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time @@ -148,8 +187,8 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) { if(unlikely(arcstats.l2exist == -1)) arcstats.l2exist = 0; - generate_charts_arcstats(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, update_every); - generate_charts_arc_summary(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, update_every); + generate_charts_arcstats(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, show_zero_charts, update_every); + generate_charts_arc_summary(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, show_zero_charts, update_every); return 0; } diff --git a/collectors/proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c old mode 100755 new mode 100644 index 931b415a5..f345a39d6 --- a/collectors/proc.plugin/proc_stat.c +++ b/collectors/proc.plugin/proc_stat.c @@ -52,6 +52,7 @@ struct cpu_chart { }; static int keep_per_core_fds_open = CONFIG_BOOLEAN_YES; +static int keep_cpuidle_fds_open = CONFIG_BOOLEAN_YES; static int read_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) { char buf[50 + 1]; @@ -161,7 +162,7 @@ static int read_per_core_time_in_state_files(struct cpu_chart *all_cpu_charts, s // the whole period under schedutil governor? // freez(tsf->last_ticks); // tsf->last_ticks = NULL; - // tsf->last_ticks_len = 0; + // tsf->last_ticks_len = 0; continue; } @@ -237,15 +238,249 @@ static void chart_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, s } } +struct cpuidle_state { + char *name; + + char *time_filename; + int time_fd; + + collected_number value; + + RRDDIM *rd; +}; + +struct per_core_cpuidle_chart { + RRDSET *st; + + RRDDIM *active_time_rd; + collected_number active_time; + collected_number last_active_time; + + struct cpuidle_state *cpuidle_state; + size_t cpuidle_state_len; + int rescan_cpu_states; +}; + +static void* wake_cpu_thread(void* core) { + pthread_t thread; + cpu_set_t cpu_set; + static size_t cpu_wakeups = 0; + static int errors = 0; + + CPU_ZERO(&cpu_set); + CPU_SET(*(int*)core, &cpu_set); + + thread = pthread_self(); + if(unlikely(pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpu_set))) { + if(unlikely(errors < 8)) { + error("Cannot set CPU affinity for core %d", *(int*)core); + errors++; + } + else if(unlikely(errors < 9)) { + error("CPU affinity errors are disabled"); + errors++; + } + } + + // Make the CPU core do something to force it to update its idle counters + cpu_wakeups++; + + return 0; +} + +static int read_schedstat(char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) { + static size_t cpuidle_charts_len = 0; + static procfile *ff = NULL; + struct per_core_cpuidle_chart *cpuidle_charts = *cpuidle_charts_address; + size_t cores_found = 0; + + if(unlikely(!ff)) { + ff = procfile_open(schedstat_filename, " \t:", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 1; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) return 1; + + size_t lines = procfile_lines(ff), l; + size_t words; + + for(l = 0; l < lines ;l++) { + char *row_key = procfile_lineword(ff, l, 0); + + // faster strncmp(row_key, "cpu", 3) == 0 + if(likely(row_key[0] == 'c' && row_key[1] == 'p' && row_key[2] == 'u')) { + words = procfile_linewords(ff, l); + if(unlikely(words < 10)) { + error("Cannot read /proc/schedstat cpu line. Expected 9 params, read %zu.", words); + return 1; + } + cores_found++; + + size_t core = str2ul(&row_key[3]); + if(unlikely(core >= cores_found)) { + error("Core %zu found but no more than %zu cores were expected.", core, cores_found); + return 1; + } + + if(unlikely(cpuidle_charts_len < cores_found)) { + cpuidle_charts = reallocz(cpuidle_charts, sizeof(struct per_core_cpuidle_chart) * cores_found); + *cpuidle_charts_address = cpuidle_charts; + memset(cpuidle_charts + cpuidle_charts_len, 0, sizeof(struct per_core_cpuidle_chart) * (cores_found - cpuidle_charts_len)); + cpuidle_charts_len = cores_found; + } + + cpuidle_charts[core].active_time = str2ull(procfile_lineword(ff, l, 7)) / 1000; + } + } + + *schedstat_cores_found = cores_found; + return 0; +} + +static int read_one_state(char *buf, const char *filename, int *fd) { + ssize_t ret = read(*fd, buf, 50); + + if(unlikely(ret <= 0)) { + // cannot read that file + error("Cannot read file '%s'", filename); + close(*fd); + *fd = -1; + return 0; + } + else { + // successful read + + // terminate the buffer + buf[ret - 1] = '\0'; + + if(unlikely(keep_cpuidle_fds_open != CONFIG_BOOLEAN_YES)) { + close(*fd); + *fd = -1; + } + else if(lseek(*fd, 0, SEEK_SET) == -1) { + error("Cannot seek in file '%s'", filename); + close(*fd); + *fd = -1; + } + } + + return 1; +} + +static int read_cpuidle_states(char *cpuidle_name_filename , char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) { + char filename[FILENAME_MAX + 1]; + static char next_state_filename[FILENAME_MAX + 1]; + struct stat stbuf; + struct per_core_cpuidle_chart *cc = &cpuidle_charts[core]; + size_t state; + + if(unlikely(!cc->cpuidle_state_len || cc->rescan_cpu_states)) { + int state_file_found = 1; // check at least one state + + if(cc->cpuidle_state_len) { + for(state = 0; state < cc->cpuidle_state_len; state++) { + freez(cc->cpuidle_state[state].name); + + freez(cc->cpuidle_state[state].time_filename); + close(cc->cpuidle_state[state].time_fd); + cc->cpuidle_state[state].time_fd = -1; + } + + freez(cc->cpuidle_state); + cc->cpuidle_state = NULL; + cc->cpuidle_state_len = 0; + + cc->active_time_rd = NULL; + cc->st = NULL; + } + + while(likely(state_file_found)) { + snprintfz(filename, FILENAME_MAX, cpuidle_name_filename, core, cc->cpuidle_state_len); + if (stat(filename, &stbuf) == 0) + cc->cpuidle_state_len++; + else + state_file_found = 0; + } + snprintfz(next_state_filename, FILENAME_MAX, cpuidle_name_filename, core, cc->cpuidle_state_len); + + cc->cpuidle_state = callocz(cc->cpuidle_state_len, sizeof(struct cpuidle_state)); + memset(cc->cpuidle_state, 0, sizeof(struct cpuidle_state) * cc->cpuidle_state_len); + + for(state = 0; state < cc->cpuidle_state_len; state++) { + char name_buf[50 + 1]; + snprintfz(filename, FILENAME_MAX, cpuidle_name_filename, core, state); + + int fd = open(filename, O_RDONLY, 0666); + if(unlikely(fd == -1)) { + error("Cannot open file '%s'", filename); + cc->rescan_cpu_states = 1; + return 1; + } + + ssize_t r = read(fd, name_buf, 50); + if(unlikely(r < 1)) { + error("Cannot read file '%s'", filename); + close(fd); + cc->rescan_cpu_states = 1; + return 1; + } + + name_buf[r - 1] = '\0'; // erase extra character + cc->cpuidle_state[state].name = strdupz(name_buf); + close(fd); + + snprintfz(filename, FILENAME_MAX, cpuidle_time_filename, core, state); + cc->cpuidle_state[state].time_filename = strdupz(filename); + cc->cpuidle_state[state].time_fd = -1; + } + + cc->rescan_cpu_states = 0; + } + + for(state = 0; state < cc->cpuidle_state_len; state++) { + + struct cpuidle_state *cs = &cc->cpuidle_state[state]; + + if(unlikely(cs->time_fd == -1)) { + cs->time_fd = open(cs->time_filename, O_RDONLY); + if (unlikely(cs->time_fd == -1)) { + error("Cannot open file '%s'", cs->time_filename); + cc->rescan_cpu_states = 1; + return 1; + } + } + + char time_buf[50 + 1]; + if(likely(read_one_state(time_buf, cs->time_filename, &cs->time_fd))) { + cs->value = str2ll(time_buf, NULL); + } + else { + cc->rescan_cpu_states = 1; + return 1; + } + } + + // check if the number of states was increased + if(unlikely(stat(next_state_filename, &stbuf) == 0)) { + cc->rescan_cpu_states = 1; + return 1; + } + + return 0; +} + int do_proc_stat(int update_every, usec_t dt) { (void)dt; static struct cpu_chart *all_cpu_charts = NULL; static size_t all_cpu_charts_size = 0; static procfile *ff = NULL; - static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1, do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1; + static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1, + do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1, do_cpuidle = -1; static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked; - static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL, *time_in_state_filename = NULL; + static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL, + *time_in_state_filename = NULL, *schedstat_filename = NULL, *cpuidle_name_filename = NULL, *cpuidle_time_filename = NULL; static RRDVAR *cpus_var = NULL; static int accurate_freq_avail = 0, accurate_freq_is_used = 0; size_t cores_found = (size_t)processors; @@ -265,6 +500,7 @@ int do_proc_stat(int update_every, usec_t dt) { do_core_throttle_count = CONFIG_BOOLEAN_NO; do_package_throttle_count = CONFIG_BOOLEAN_NO; do_cpu_freq = CONFIG_BOOLEAN_NO; + do_cpuidle = CONFIG_BOOLEAN_NO; } else { // the system has a reasonable number of processors @@ -272,12 +508,23 @@ int do_proc_stat(int update_every, usec_t dt) { do_core_throttle_count = CONFIG_BOOLEAN_AUTO; do_package_throttle_count = CONFIG_BOOLEAN_NO; do_cpu_freq = CONFIG_BOOLEAN_YES; + do_cpuidle = CONFIG_BOOLEAN_YES; + } + if(unlikely(processors > 24)) { + // the system has too many processors + keep_cpuidle_fds_open = CONFIG_BOOLEAN_NO; + } + else { + // the system has a reasonable number of processors + keep_cpuidle_fds_open = CONFIG_BOOLEAN_YES; } keep_per_core_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep per core files open", keep_per_core_fds_open); + keep_cpuidle_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep cpuidle files open", keep_cpuidle_fds_open); do_core_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "core_throttle_count", do_core_throttle_count); do_package_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "package_throttle_count", do_package_throttle_count); do_cpu_freq = config_get_boolean_ondemand("plugin:proc:/proc/stat", "cpu frequency", do_cpu_freq); + do_cpuidle = config_get_boolean_ondemand("plugin:proc:/proc/stat", "cpu idle states", do_cpuidle); hash_intr = simple_hash("intr"); hash_ctxt = simple_hash("ctxt"); @@ -297,6 +544,22 @@ int do_proc_stat(int update_every, usec_t dt) { snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/stats/time_in_state"); time_in_state_filename = config_get("plugin:proc:/proc/stat", "time_in_state filename to monitor", filename); + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/schedstat"); + schedstat_filename = config_get("plugin:proc:/proc/stat", "schedstat filename to monitor", filename); + + if(do_cpuidle != CONFIG_BOOLEAN_NO) { + struct stat stbuf; + + if (stat(schedstat_filename, &stbuf)) + do_cpuidle = CONFIG_BOOLEAN_NO; + } + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/name"); + cpuidle_name_filename = config_get("plugin:proc:/proc/stat", "cpuidle name filename to monitor", filename); + + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/time"); + cpuidle_time_filename = config_get("plugin:proc:/proc/stat", "cpuidle time filename to monitor", filename); } if(unlikely(!ff)) { @@ -407,7 +670,7 @@ int do_proc_stat(int update_every, usec_t dt) { cpu_chart->files[CPU_FREQ_INDEX].fd = -1; do_cpu_freq = CONFIG_BOOLEAN_YES; } - + snprintfz(filename, FILENAME_MAX, time_in_state_filename, id); if (stat(filename, &stbuf) == 0) { @@ -702,7 +965,7 @@ int do_proc_stat(int update_every, usec_t dt) { , "MHz" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_STAT_NAME - , 5003 + , NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ , update_every , RRDSET_TYPE_LINE ); @@ -715,6 +978,80 @@ int do_proc_stat(int update_every, usec_t dt) { } } + // -------------------------------------------------------------------- + + static struct per_core_cpuidle_chart *cpuidle_charts = NULL; + size_t schedstat_cores_found = 0; + + if(likely(do_cpuidle != CONFIG_BOOLEAN_NO && !read_schedstat(schedstat_filename, &cpuidle_charts, &schedstat_cores_found))) { + int cpu_states_updated = 0; + size_t core, state; + + + // proc.plugin runs on Linux systems only. Multi-platform compatibility is not needed here, + // so bare pthread functions are used to avoid unneeded overheads. + for(core = 0; core < schedstat_cores_found; core++) { + if(unlikely(!(cpuidle_charts[core].active_time - cpuidle_charts[core].last_active_time))) { + pthread_t thread; + + if(unlikely(pthread_create(&thread, NULL, wake_cpu_thread, (void *)&core))) + error("Cannot create wake_cpu_thread"); + else if(unlikely(pthread_join(thread, NULL))) + error("Cannot join wake_cpu_thread"); + cpu_states_updated = 1; + } + } + + if(unlikely(!cpu_states_updated || !read_schedstat(schedstat_filename, &cpuidle_charts, &schedstat_cores_found))) { + for(core = 0; core < schedstat_cores_found; core++) { + cpuidle_charts[core].last_active_time = cpuidle_charts[core].active_time; + + int r = read_cpuidle_states(cpuidle_name_filename, cpuidle_time_filename, cpuidle_charts, core); + if(likely(r != -1 && (do_cpuidle == CONFIG_BOOLEAN_YES || r > 0))) { + do_cpuidle = CONFIG_BOOLEAN_YES; + + char cpuidle_chart_id[RRD_ID_LENGTH_MAX + 1]; + snprintfz(cpuidle_chart_id, RRD_ID_LENGTH_MAX, "cpu%zu_cpuidle", core); + + if(unlikely(!cpuidle_charts[core].st)) { + cpuidle_charts[core].st = rrdset_create_localhost( + "cpu" + , cpuidle_chart_id + , NULL + , "cpuidle" + , "cpuidle.cpuidle" + , "C-state residency time" + , "percentage" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_STAT_NAME + , NETDATA_CHART_PRIO_CPUIDLE + core + , update_every + , RRDSET_TYPE_STACKED + ); + + char cpuidle_dim_id[RRD_ID_LENGTH_MAX + 1]; + snprintfz(cpuidle_dim_id, RRD_ID_LENGTH_MAX, "cpu%zu_active_time", core); + cpuidle_charts[core].active_time_rd = rrddim_add(cpuidle_charts[core].st, cpuidle_dim_id, "C0 (active)", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + for(state = 0; state < cpuidle_charts[core].cpuidle_state_len; state++) { + snprintfz(cpuidle_dim_id, RRD_ID_LENGTH_MAX, "cpu%zu_cpuidle_state%zu_time", core, state); + cpuidle_charts[core].cpuidle_state[state].rd = rrddim_add(cpuidle_charts[core].st, cpuidle_dim_id, + cpuidle_charts[core].cpuidle_state[state].name, + 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL); + } + } + else + rrdset_next(cpuidle_charts[core].st); + + rrddim_set_by_pointer(cpuidle_charts[core].st, cpuidle_charts[core].active_time_rd, cpuidle_charts[core].active_time); + for(state = 0; state < cpuidle_charts[core].cpuidle_state_len; state++) { + rrddim_set_by_pointer(cpuidle_charts[core].st, cpuidle_charts[core].cpuidle_state[state].rd, cpuidle_charts[core].cpuidle_state[state].value); + } + rrdset_done(cpuidle_charts[core].st); + } + } + } + } + if(cpus_var) rrdvar_custom_host_variable_set(localhost, cpus_var, cores_found); diff --git a/collectors/proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c index f7c93c20a..a9712b242 100644 --- a/collectors/proc.plugin/proc_vmstat.c +++ b/collectors/proc.plugin/proc_vmstat.c @@ -105,7 +105,7 @@ int do_proc_vmstat(int update_every, usec_t dt) { , "swap" , NULL , "Swap I/O" - , "kilobytes/s" + , "KiB/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_VMSTAT_NAME , NETDATA_CHART_PRIO_SYSTEM_SWAPIO @@ -137,7 +137,7 @@ int do_proc_vmstat(int update_every, usec_t dt) { , "disk" , NULL , "Memory Paged from/to disk" - , "kilobytes/s" + , "KiB/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_VMSTAT_NAME , NETDATA_CHART_PRIO_SYSTEM_PGPGIO @@ -169,7 +169,7 @@ int do_proc_vmstat(int update_every, usec_t dt) { , "system" , NULL , "Memory Page Faults" - , "page faults/s" + , "faults/s" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_VMSTAT_NAME , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS diff --git a/collectors/proc.plugin/sys_class_power_supply.c b/collectors/proc.plugin/sys_class_power_supply.c new file mode 100644 index 000000000..09cdc7c0d --- /dev/null +++ b/collectors/proc.plugin/sys_class_power_supply.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "plugin_proc.h" + +#define PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME "/sys/class/power_supply" + +const char *ps_property_names[] = { "charge", "energy", "voltage"}; +const char *ps_property_titles[] = {"Battery charge", "Battery energy", "Power supply voltage"}; +const char *ps_property_units[] = { "Ah", "Wh", "V"}; + +const char *ps_property_dim_names[] = {"empty_design", "empty", "now", "full", "full_design", + "empty_design", "empty", "now", "full", "full_design", + "min_design", "min", "now", "max", "max_design"}; + +struct ps_property_dim { + char *name; + char *filename; + int fd; + + RRDDIM *rd; + unsigned long long value; + + struct ps_property_dim *next; +}; + +struct ps_property { + char *name; + char *title; + char *units; + + RRDSET *st; + + struct ps_property_dim *property_dim_root; + + struct ps_property *next; +}; + +struct capacity { + char *filename; + int fd; + + RRDSET *st; + RRDDIM *rd; + unsigned long long value; +}; + +struct power_supply { + char *name; + uint32_t hash; + int found; + + struct capacity *capacity; + + struct ps_property *property_root; + + struct power_supply *next; +}; + +static struct power_supply *power_supply_root = NULL; +static int files_num = 0; + +void power_supply_free(struct power_supply *ps) { + if(likely(ps)) { + + // free capacity structure + if(likely(ps->capacity)) { + if(likely(ps->capacity->st)) rrdset_is_obsolete(ps->capacity->st); + freez(ps->capacity->filename); + if(likely(ps->capacity->fd != -1)) close(ps->capacity->fd); + files_num--; + freez(ps->capacity); + } + freez(ps->name); + + struct ps_property *pr = ps->property_root; + while(likely(pr)) { + + // free dimensions + struct ps_property_dim *pd = pr->property_dim_root; + while(likely(pd)) { + freez(pd->name); + freez(pd->filename); + if(likely(pd->fd != -1)) close(pd->fd); + files_num--; + struct ps_property_dim *d = pd; + pd = pd->next; + freez(d); + } + + // free properties + if(likely(pr->st)) rrdset_is_obsolete(pr->st); + freez(pr->name); + freez(pr->title); + freez(pr->units); + struct ps_property *p = pr; + pr = pr->next; + freez(p); + } + + // remove power supply from linked list + if(likely(ps == power_supply_root)) { + power_supply_root = ps->next; + } + else { + struct power_supply *last; + for(last = power_supply_root; last && last->next != ps; last = last->next); + if(likely(last)) last->next = ps->next; + } + + freez(ps); + } +} + +int do_sys_class_power_supply(int update_every, usec_t dt) { + (void)dt; + static int do_capacity = -1, do_property[3] = {-1}; + static int keep_fds_open = CONFIG_BOOLEAN_NO, keep_fds_open_config = -1; + static char *dirname = NULL; + + if(unlikely(do_capacity == -1)) { + do_capacity = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery capacity", CONFIG_BOOLEAN_YES); + do_property[0] = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery charge", CONFIG_BOOLEAN_NO); + do_property[1] = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery energy", CONFIG_BOOLEAN_NO); + do_property[2] = config_get_boolean("plugin:proc:/sys/class/power_supply", "power supply voltage", CONFIG_BOOLEAN_NO); + + keep_fds_open_config = config_get_boolean_ondemand("plugin:proc:/sys/class/power_supply", "keep files open", CONFIG_BOOLEAN_AUTO); + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/power_supply"); + dirname = config_get("plugin:proc:/sys/class/power_supply", "directory to monitor", filename); + } + + DIR *dir = opendir(dirname); + if(unlikely(!dir)) { + error("Cannot read directory '%s'", dirname); + return 1; + } + + struct dirent *de = NULL; + while(likely(de = readdir(dir))) { + if(likely(de->d_type == DT_DIR + && ( + (de->d_name[0] == '.' && de->d_name[1] == '\0') + || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0') + ))) + continue; + + if(likely(de->d_type == DT_LNK || de->d_type == DT_DIR)) { + uint32_t hash = simple_hash(de->d_name); + + struct power_supply *ps; + for(ps = power_supply_root; ps; ps = ps->next) { + if(unlikely(ps->hash == hash && !strcmp(ps->name, de->d_name))) { + ps->found = 1; + break; + } + } + + // allocate memory for power supply and initialize it + if(unlikely(!ps)) { + ps = callocz(sizeof(struct power_supply), 1); + ps->name = strdupz(de->d_name); + ps->hash = simple_hash(de->d_name); + ps->found = 1; + ps->next = power_supply_root; + power_supply_root = ps; + + struct stat stbuf; + if(likely(do_capacity != CONFIG_BOOLEAN_NO)) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/%s/%s", dirname, de->d_name, "capacity"); + if (stat(filename, &stbuf) == 0) { + ps->capacity = callocz(sizeof(struct capacity), 1); + ps->capacity->filename = strdupz(filename); + ps->capacity->fd = -1; + files_num++; + } + } + + // allocate memory and initialize structures for every property and file found + size_t pr_idx, pd_idx; + size_t prev_idx = 3; // there is no property with this index + + for(pr_idx = 0; pr_idx < 3; pr_idx++) { + if(unlikely(do_property[pr_idx] != CONFIG_BOOLEAN_NO)) { + struct ps_property *pr = NULL; + + for(pd_idx = pr_idx * 5; pd_idx < pr_idx * 5 + 5; pd_idx++) { + + // check if file exists + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/%s/%s_%s", dirname, de->d_name, + ps_property_names[pr_idx], ps_property_dim_names[pd_idx]); + if (stat(filename, &stbuf) == 0) { + + // add chart + if(unlikely(prev_idx != pr_idx)) { + pr = callocz(sizeof(struct ps_property), 1); + pr->name = strdupz(ps_property_names[pr_idx]); + pr->title = strdupz(ps_property_titles[pr_idx]); + pr->units = strdupz(ps_property_units[pr_idx]); + prev_idx = pr_idx; + pr->next = ps->property_root; + ps->property_root = pr; + } + + // add dimension + struct ps_property_dim *pd; + pd= callocz(sizeof(struct ps_property_dim), 1); + pd->name = strdupz(ps_property_dim_names[pd_idx]); + pd->filename = strdupz(filename); + pd->fd = -1; + files_num++; + pd->next = pr->property_dim_root; + pr->property_dim_root = pd; + } + } + } + } + } + + // read capacity file + if(likely(ps->capacity)) { + char buffer[30 + 1]; + + if(unlikely(ps->capacity->fd == -1)) { + ps->capacity->fd = open(ps->capacity->filename, O_RDONLY, 0666); + if(unlikely(ps->capacity->fd == -1)) { + error("Cannot open file '%s'", ps->capacity->filename); + power_supply_free(ps); + } + } + + ssize_t r = read(ps->capacity->fd, buffer, 30); + if(unlikely(r < 1)) { + error("Cannot read file '%s'", ps->capacity->filename); + power_supply_free(ps); + } + else { + buffer[r] = '\0'; + ps->capacity->value = str2ull(buffer); + } + + if(unlikely(!keep_fds_open)) { + close(ps->capacity->fd); + ps->capacity->fd = -1; + } + else if(unlikely(lseek(ps->capacity->fd, 0, SEEK_SET) == -1)) { + error("Cannot seek in file '%s'", ps->capacity->filename); + close(ps->capacity->fd); + ps->capacity->fd = -1; + } + } + + // read property files + int read_error = 0; + struct ps_property *pr; + for(pr = ps->property_root; pr && !read_error; pr = pr->next) { + struct ps_property_dim *pd; + for(pd = pr->property_dim_root; pd; pd = pd->next) { + char buffer[30 + 1]; + + if(unlikely(pd->fd == -1)) { + pd->fd = open(pd->filename, O_RDONLY, 0666); + if(unlikely(pd->fd == -1)) { + error("Cannot open file '%s'", pd->filename); + read_error = 1; + power_supply_free(ps); + break; + } + } + + ssize_t r = read(pd->fd, buffer, 30); + if(unlikely(r < 1)) { + error("Cannot read file '%s'", pd->filename); + read_error = 1; + power_supply_free(ps); + break; + } + buffer[r] = '\0'; + pd->value = str2ull(buffer); + + if(unlikely(!keep_fds_open)) { + close(pd->fd); + pd->fd = -1; + } + else if(unlikely(lseek(pd->fd, 0, SEEK_SET) == -1)) { + error("Cannot seek in file '%s'", pd->filename); + close(pd->fd); + pd->fd = -1; + } + } + } + } + } + + closedir(dir); + + keep_fds_open = keep_fds_open_config; + if(likely(keep_fds_open_config == CONFIG_BOOLEAN_AUTO)) { + if(unlikely(files_num > 32)) + keep_fds_open = CONFIG_BOOLEAN_NO; + else + keep_fds_open = CONFIG_BOOLEAN_YES; + } + + // -------------------------------------------------------------------- + + struct power_supply *ps = power_supply_root; + while(unlikely(ps)) { + if(unlikely(!ps->found)) { + struct power_supply *f = ps; + ps = ps->next; + power_supply_free(f); + continue; + } + + if(likely(ps->capacity)) { + if(unlikely(!ps->capacity->st)) { + ps->capacity->st = rrdset_create_localhost( + "powersupply_capacity" + , ps->name + , NULL + , ps->name + , "powersupply.capacity" + , "Battery capacity" + , "percentage" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME + , NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY + , update_every + , RRDSET_TYPE_LINE + ); + } + else + rrdset_next(ps->capacity->st); + + if(unlikely(!ps->capacity->rd)) ps->capacity->rd = rrddim_add(ps->capacity->st, "capacity", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + rrddim_set_by_pointer(ps->capacity->st, ps->capacity->rd, ps->capacity->value); + + rrdset_done(ps->capacity->st); + } + + struct ps_property *pr; + for(pr = ps->property_root; pr; pr = pr->next) { + if(unlikely(!pr->st)) { + char id[RRD_ID_LENGTH_MAX + 1], context[RRD_ID_LENGTH_MAX + 1]; + snprintfz(id, RRD_ID_LENGTH_MAX, "powersupply_%s", pr->name); + snprintfz(context, RRD_ID_LENGTH_MAX, "powersupply.%s", pr->name); + + pr->st = rrdset_create_localhost( + id + , ps->name + , NULL + , ps->name + , context + , pr->title + , pr->units + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME + , NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY + , update_every + , RRDSET_TYPE_LINE + ); + } + else + rrdset_next(pr->st); + + struct ps_property_dim *pd; + for(pd = pr->property_dim_root; pd; pd = pd->next) { + if(unlikely(!pd->rd)) pd->rd = rrddim_add(pr->st, pd->name, NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE); + rrddim_set_by_pointer(pr->st, pd->rd, pd->value); + } + + rrdset_done(pr->st); + } + + ps->found = 0; + ps = ps->next; + } + + return 0; +} diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c index ed980cea5..5aab24c1a 100644 --- a/collectors/proc.plugin/sys_fs_btrfs.c +++ b/collectors/proc.plugin/sys_fs_btrfs.c @@ -558,7 +558,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { , node->label , "btrfs.disk" , title - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_BTRFS_NAME , NETDATA_CHART_PRIO_BTRFS_DISK @@ -614,7 +614,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { , node->label , "btrfs.data" , title - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_BTRFS_NAME , NETDATA_CHART_PRIO_BTRFS_DATA @@ -655,7 +655,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { , node->label , "btrfs.metadata" , title - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_BTRFS_NAME , NETDATA_CHART_PRIO_BTRFS_METADATA @@ -698,7 +698,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { , node->label , "btrfs.system" , title - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_BTRFS_NAME , NETDATA_CHART_PRIO_BTRFS_SYSTEM diff --git a/collectors/proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c index 0f5c79c49..0b64987c9 100644 --- a/collectors/proc.plugin/sys_kernel_mm_ksm.c +++ b/collectors/proc.plugin/sys_kernel_mm_ksm.c @@ -105,7 +105,7 @@ int do_sys_kernel_mm_ksm(int update_every, usec_t dt) { , "ksm" , NULL , "Kernel Same Page Merging" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_KSM_NAME , NETDATA_CHART_PRIO_MEM_KSM @@ -145,7 +145,7 @@ int do_sys_kernel_mm_ksm(int update_every, usec_t dt) { , "ksm" , NULL , "Kernel Same Page Merging Savings" - , "MB" + , "MiB" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_KSM_NAME , NETDATA_CHART_PRIO_MEM_KSM_SAVINGS diff --git a/collectors/proc.plugin/zfs_common.c b/collectors/proc.plugin/zfs_common.c index 1aaceb908..330bcf18b 100644 --- a/collectors/proc.plugin/zfs_common.c +++ b/collectors/proc.plugin/zfs_common.c @@ -4,7 +4,13 @@ struct arcstats arcstats = { 0 }; -void generate_charts_arcstats(const char *plugin, const char *module, int update_every) { +void generate_charts_arcstats(const char *plugin, const char *module, int show_zero_charts, int update_every) { + static int do_arc_size = -1, do_l2_size = -1, do_reads = -1, do_l2bytes = -1, do_ahits = -1, do_dhits = -1, \ + do_phits = -1, do_mhits = -1, do_l2hits = -1, do_list_hits = -1; + + if(unlikely(do_arc_size == -1)) + do_arc_size = do_l2_size = do_reads = do_l2bytes = do_ahits = do_dhits = do_phits = do_mhits \ + = do_l2hits = do_list_hits = show_zero_charts; // ARC reads unsigned long long aread = arcstats.hits + arcstats.misses; @@ -31,7 +37,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - { + if(do_arc_size == CONFIG_BOOLEAN_YES || arcstats.size || arcstats.c || arcstats.c_min || arcstats.c_max) { + do_arc_size = CONFIG_BOOLEAN_YES; + static RRDSET *st_arc_size = NULL; static RRDDIM *rd_arc_size = NULL; static RRDDIM *rd_arc_target_size = NULL; @@ -46,7 +54,7 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update , ZFS_FAMILY_SIZE , NULL , "ZFS ARC Size" - , "MB" + , "MiB" , plugin , module , NETDATA_CHART_PRIO_ZFS_ARC_SIZE @@ -71,7 +79,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - if(likely(arcstats.l2exist)) { + if(likely(arcstats.l2exist) && (do_l2_size == CONFIG_BOOLEAN_YES || arcstats.l2_size || arcstats.l2_asize)) { + do_l2_size = CONFIG_BOOLEAN_YES; + static RRDSET *st_l2_size = NULL; static RRDDIM *rd_l2_size = NULL; static RRDDIM *rd_l2_asize = NULL; @@ -84,7 +94,7 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update , ZFS_FAMILY_SIZE , NULL , "ZFS L2 ARC Size" - , "MB" + , "MiB" , plugin , module , NETDATA_CHART_PRIO_ZFS_L2_SIZE @@ -105,7 +115,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - { + if(likely(do_reads == CONFIG_BOOLEAN_YES || aread || dread || pread || mread || l2read)) { + do_reads = CONFIG_BOOLEAN_YES; + static RRDSET *st_reads = NULL; static RRDDIM *rd_aread = NULL; static RRDDIM *rd_dread = NULL; @@ -153,7 +165,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - if(likely(arcstats.l2exist)) { + if(likely(arcstats.l2exist && (do_l2bytes == CONFIG_BOOLEAN_YES || arcstats.l2_read_bytes || arcstats.l2_write_bytes))) { + do_l2bytes = CONFIG_BOOLEAN_YES; + static RRDSET *st_l2bytes = NULL; static RRDDIM *rd_l2_read_bytes = NULL; static RRDDIM *rd_l2_write_bytes = NULL; @@ -166,7 +180,7 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update , ZFS_FAMILY_ACCESSES , NULL , "ZFS ARC L2 Read/Write Rate" - , "kilobytes/s" + , "KiB/s" , plugin , module , NETDATA_CHART_PRIO_ZFS_IO @@ -187,7 +201,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - { + if(likely(do_ahits == CONFIG_BOOLEAN_YES || arcstats.hits || arcstats.misses)) { + do_ahits = CONFIG_BOOLEAN_YES; + static RRDSET *st_ahits = NULL; static RRDDIM *rd_ahits = NULL; static RRDDIM *rd_amisses = NULL; @@ -221,7 +237,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - { + if(likely(do_dhits == CONFIG_BOOLEAN_YES || dhit || dmiss)) { + do_dhits = CONFIG_BOOLEAN_YES; + static RRDSET *st_dhits = NULL; static RRDDIM *rd_dhits = NULL; static RRDDIM *rd_dmisses = NULL; @@ -255,7 +273,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - { + if(likely(do_phits == CONFIG_BOOLEAN_YES || phit || pmiss)) { + do_phits = CONFIG_BOOLEAN_YES; + static RRDSET *st_phits = NULL; static RRDDIM *rd_phits = NULL; static RRDDIM *rd_pmisses = NULL; @@ -289,7 +309,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - { + if(likely(do_mhits == CONFIG_BOOLEAN_YES || mhit || mmiss)) { + do_mhits = CONFIG_BOOLEAN_YES; + static RRDSET *st_mhits = NULL; static RRDDIM *rd_mhits = NULL; static RRDDIM *rd_mmisses = NULL; @@ -323,7 +345,9 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - if(likely(arcstats.l2exist)) { + if(likely(arcstats.l2exist && (do_l2hits == CONFIG_BOOLEAN_YES || l2hit || l2miss))) { + do_l2hits = CONFIG_BOOLEAN_YES; + static RRDSET *st_l2hits = NULL; static RRDDIM *rd_l2hits = NULL; static RRDDIM *rd_l2misses = NULL; @@ -357,7 +381,12 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update // -------------------------------------------------------------------- - { + if(likely(do_list_hits == CONFIG_BOOLEAN_YES || arcstats.mfu_hits \ + || arcstats.mru_hits \ + || arcstats.mfu_ghost_hits \ + || arcstats.mru_ghost_hits)) { + do_list_hits = CONFIG_BOOLEAN_YES; + static RRDSET *st_list_hits = NULL; static RRDDIM *rd_mfu = NULL; static RRDDIM *rd_mru = NULL; @@ -396,7 +425,14 @@ void generate_charts_arcstats(const char *plugin, const char *module, int update } } -void generate_charts_arc_summary(const char *plugin, const char *module, int update_every) { +void generate_charts_arc_summary(const char *plugin, const char *module, int show_zero_charts, int update_every) { + static int do_arc_size_breakdown = -1, do_memory = -1, do_important_ops = -1, do_actual_hits = -1, \ + do_demand_data_hits = -1, do_prefetch_data_hits = -1, do_hash_elements = -1, do_hash_chains = -1; + + if(unlikely(do_arc_size_breakdown == -1)) + do_arc_size_breakdown = do_memory = do_important_ops = do_actual_hits = do_demand_data_hits \ + = do_prefetch_data_hits = do_hash_elements = do_hash_chains = show_zero_charts; + unsigned long long arc_accesses_total = arcstats.hits + arcstats.misses; unsigned long long real_hits = arcstats.mfu_hits + arcstats.mru_hits; unsigned long long real_misses = arc_accesses_total - real_hits; @@ -418,7 +454,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd // -------------------------------------------------------------------- - { + if(likely(do_arc_size_breakdown == CONFIG_BOOLEAN_YES || mru_size || mfu_size)) { + do_arc_size_breakdown = CONFIG_BOOLEAN_YES; + static RRDSET *st_arc_size_breakdown = NULL; static RRDDIM *rd_most_recent = NULL; static RRDDIM *rd_most_frequent = NULL; @@ -452,7 +490,11 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd // -------------------------------------------------------------------- - { + if(likely(do_memory == CONFIG_BOOLEAN_YES || arcstats.memory_direct_count \ + || arcstats.memory_throttle_count \ + || arcstats.memory_indirect_count)) { + do_memory = CONFIG_BOOLEAN_YES; + static RRDSET *st_memory = NULL; #ifndef __FreeBSD__ static RRDDIM *rd_direct = NULL; @@ -501,7 +543,12 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd // -------------------------------------------------------------------- - { + if(likely(do_important_ops == CONFIG_BOOLEAN_YES || arcstats.deleted \ + || arcstats.evict_skip \ + || arcstats.mutex_miss \ + || arcstats.hash_collisions)) { + do_important_ops = CONFIG_BOOLEAN_YES; + static RRDSET *st_important_ops = NULL; static RRDDIM *rd_deleted = NULL; static RRDDIM *rd_mutex_misses = NULL; @@ -541,7 +588,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd // -------------------------------------------------------------------- - { + if(likely(do_actual_hits == CONFIG_BOOLEAN_YES || real_hits || real_misses)) { + do_actual_hits = CONFIG_BOOLEAN_YES; + static RRDSET *st_actual_hits = NULL; static RRDDIM *rd_actual_hits = NULL; static RRDDIM *rd_actual_misses = NULL; @@ -575,7 +624,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd // -------------------------------------------------------------------- - { + if(likely(do_demand_data_hits == CONFIG_BOOLEAN_YES || arcstats.demand_data_hits || arcstats.demand_data_misses)) { + do_demand_data_hits = CONFIG_BOOLEAN_YES; + static RRDSET *st_demand_data_hits = NULL; static RRDDIM *rd_demand_data_hits = NULL; static RRDDIM *rd_demand_data_misses = NULL; @@ -609,7 +660,10 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd // -------------------------------------------------------------------- - { + if(likely(do_prefetch_data_hits == CONFIG_BOOLEAN_YES || arcstats.prefetch_data_hits \ + || arcstats.prefetch_data_misses)) { + do_prefetch_data_hits = CONFIG_BOOLEAN_YES; + static RRDSET *st_prefetch_data_hits = NULL; static RRDDIM *rd_prefetch_data_hits = NULL; static RRDDIM *rd_prefetch_data_misses = NULL; @@ -643,7 +697,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd // -------------------------------------------------------------------- - { + if(likely(do_hash_elements == CONFIG_BOOLEAN_YES || arcstats.hash_elements || arcstats.hash_elements_max)) { + do_hash_elements = CONFIG_BOOLEAN_YES; + static RRDSET *st_hash_elements = NULL; static RRDDIM *rd_hash_elements_current = NULL; static RRDDIM *rd_hash_elements_max = NULL; @@ -677,7 +733,9 @@ void generate_charts_arc_summary(const char *plugin, const char *module, int upd // -------------------------------------------------------------------- - { + if(likely(do_hash_chains == CONFIG_BOOLEAN_YES || arcstats.hash_chains || arcstats.hash_chain_max)) { + do_hash_chains = CONFIG_BOOLEAN_YES; + static RRDSET *st_hash_chains = NULL; static RRDDIM *rd_hash_chains_current = NULL; static RRDDIM *rd_hash_chains_max = NULL; diff --git a/collectors/proc.plugin/zfs_common.h b/collectors/proc.plugin/zfs_common.h index fab54f59a..148f9e474 100644 --- a/collectors/proc.plugin/zfs_common.h +++ b/collectors/proc.plugin/zfs_common.h @@ -109,7 +109,7 @@ struct arcstats { int l2exist; }; -void generate_charts_arcstats(const char *plugin, const char *module, int update_every); -void generate_charts_arc_summary(const char *plugin, const char *module, int update_every); +void generate_charts_arcstats(const char *plugin, const char *module, int show_zero_charts, int update_every); +void generate_charts_arc_summary(const char *plugin, const char *module, int show_zero_charts, int update_every); #endif //NETDATA_ZFS_COMMON_H diff --git a/collectors/python.d.plugin/.keep b/collectors/python.d.plugin/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am index 984050c42..3599d9c9f 100644 --- a/collectors/python.d.plugin/Makefile.am +++ b/collectors/python.d.plugin/Makefile.am @@ -29,12 +29,11 @@ dist_python_DATA = \ userpythonconfigdir=$(configdir)/python.d dist_userpythonconfig_DATA = \ - $(top_srcdir)/installer/.keep \ + .keep \ $(NULL) pythonconfigdir=$(libconfigdir)/python.d dist_pythonconfig_DATA = \ - $(top_srcdir)/installer/.keep \ $(NULL) include adaptec_raid/Makefile.inc diff --git a/collectors/python.d.plugin/Makefile.in b/collectors/python.d.plugin/Makefile.in deleted file mode 100644 index 495606896..000000000 --- a/collectors/python.d.plugin/Makefile.in +++ /dev/null @@ -1,2025 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/build/subst.inc \ - $(srcdir)/adaptec_raid/Makefile.inc \ - $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc \ - $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc \ - $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc \ - $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \ - $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc \ - $(srcdir)/dns_query_time/Makefile.inc \ - $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc \ - $(srcdir)/elasticsearch/Makefile.inc \ - $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \ - $(srcdir)/fail2ban/Makefile.inc \ - $(srcdir)/freeradius/Makefile.inc \ - $(srcdir)/go_expvar/Makefile.inc \ - $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc \ - $(srcdir)/httpcheck/Makefile.inc \ - $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc \ - $(srcdir)/isc_dhcpd/Makefile.inc \ - $(srcdir)/linux_power_supply/Makefile.inc \ - $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc \ - $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc \ - $(srcdir)/memcached/Makefile.inc \ - $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc \ - $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc \ - $(srcdir)/nginx_plus/Makefile.inc \ - $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc \ - $(srcdir)/ntpd/Makefile.inc \ - $(srcdir)/ovpn_status_log/Makefile.inc \ - $(srcdir)/openldap/Makefile.inc $(srcdir)/phpfpm/Makefile.inc \ - $(srcdir)/portcheck/Makefile.inc \ - $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc \ - $(srcdir)/powerdns/Makefile.inc \ - $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc \ - $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc \ - $(srcdir)/rethinkdbs/Makefile.inc \ - $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc \ - $(srcdir)/sensors/Makefile.inc \ - $(srcdir)/smartd_log/Makefile.inc \ - $(srcdir)/spigotmc/Makefile.inc \ - $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc \ - $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc \ - $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc \ - $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc \ - $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc \ - $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS) \ - $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \ - $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \ - $(dist_python_urllib3_DATA) \ - $(dist_python_urllib3_backports_DATA) \ - $(dist_python_urllib3_contrib_DATA) \ - $(dist_python_urllib3_packages_DATA) \ - $(dist_python_urllib3_securetransport_DATA) \ - $(dist_python_urllib3_ssl_match_hostname_DATA) \ - $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \ - $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \ - $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \ - $(dist_userpythonconfig_DATA) -subdir = collectors/python.d.plugin -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" \ - "$(DESTDIR)$(basesdir)" \ - "$(DESTDIR)$(bases_framework_servicesdir)" \ - "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" \ - "$(DESTDIR)$(python_urllib3dir)" \ - "$(DESTDIR)$(python_urllib3_backportsdir)" \ - "$(DESTDIR)$(python_urllib3_contribdir)" \ - "$(DESTDIR)$(python_urllib3_packagesdir)" \ - "$(DESTDIR)$(python_urllib3_securetransportdir)" \ - "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \ - "$(DESTDIR)$(python_urllib3_utildir)" \ - "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" \ - "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" \ - "$(DESTDIR)$(third_partydir)" \ - "$(DESTDIR)$(userpythonconfigdir)" -SCRIPTS = $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \ - $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \ - $(dist_python_urllib3_DATA) \ - $(dist_python_urllib3_backports_DATA) \ - $(dist_python_urllib3_contrib_DATA) \ - $(dist_python_urllib3_packages_DATA) \ - $(dist_python_urllib3_securetransport_DATA) \ - $(dist_python_urllib3_ssl_match_hostname_DATA) \ - $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \ - $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \ - $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \ - $(dist_userpythonconfig_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - python.d.plugin \ - $(NULL) - -SUFFIXES = .in -dist_libconfig_DATA = \ - python.d.conf \ - $(NULL) - -dist_plugins_SCRIPTS = \ - python.d.plugin \ - $(NULL) - - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution - -# do not install these files, but include them in the distribution -dist_noinst_DATA = python.d.plugin.in README.md $(NULL) \ - adaptec_raid/README.md adaptec_raid/Makefile.inc \ - apache/README.md apache/Makefile.inc beanstalk/README.md \ - beanstalk/Makefile.inc bind_rndc/README.md \ - bind_rndc/Makefile.inc boinc/README.md boinc/Makefile.inc \ - ceph/README.md ceph/Makefile.inc chrony/README.md \ - chrony/Makefile.inc couchdb/README.md couchdb/Makefile.inc \ - cpufreq/README.md cpufreq/Makefile.inc cpuidle/README.md \ - cpuidle/Makefile.inc dnsdist/README.md dnsdist/Makefile.inc \ - dns_query_time/README.md dns_query_time/Makefile.inc \ - dockerd/README.md dockerd/Makefile.inc dovecot/README.md \ - dovecot/Makefile.inc elasticsearch/README.md \ - elasticsearch/Makefile.inc example/README.md \ - example/Makefile.inc exim/README.md exim/Makefile.inc \ - fail2ban/README.md fail2ban/Makefile.inc freeradius/README.md \ - freeradius/Makefile.inc go_expvar/README.md \ - go_expvar/Makefile.inc haproxy/README.md haproxy/Makefile.inc \ - hddtemp/README.md hddtemp/Makefile.inc httpcheck/README.md \ - httpcheck/Makefile.inc icecast/README.md icecast/Makefile.inc \ - ipfs/README.md ipfs/Makefile.inc isc_dhcpd/README.md \ - isc_dhcpd/Makefile.inc linux_power_supply/README.md \ - linux_power_supply/Makefile.inc litespeed/README.md \ - litespeed/Makefile.inc logind/README.md logind/Makefile.inc \ - mdstat/README.md mdstat/Makefile.inc megacli/README.md \ - megacli/Makefile.inc memcached/README.md \ - memcached/Makefile.inc mongodb/README.md mongodb/Makefile.inc \ - monit/README.md monit/Makefile.inc mysql/README.md \ - mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \ - nginx_plus/README.md nginx_plus/Makefile.inc \ - nvidia_smi/README.md nvidia_smi/Makefile.inc nsd/README.md \ - nsd/Makefile.inc ntpd/README.md ntpd/Makefile.inc \ - ovpn_status_log/README.md ovpn_status_log/Makefile.inc \ - openldap/README.md openldap/Makefile.inc phpfpm/README.md \ - phpfpm/Makefile.inc portcheck/README.md portcheck/Makefile.inc \ - postfix/README.md postfix/Makefile.inc postgres/README.md \ - postgres/Makefile.inc powerdns/README.md powerdns/Makefile.inc \ - proxysql/README.md proxysql/Makefile.inc puppet/README.md \ - puppet/Makefile.inc rabbitmq/README.md rabbitmq/Makefile.inc \ - redis/README.md redis/Makefile.inc rethinkdbs/README.md \ - rethinkdbs/Makefile.inc retroshare/README.md \ - retroshare/Makefile.inc samba/README.md samba/Makefile.inc \ - sensors/README.md sensors/Makefile.inc smartd_log/README.md \ - smartd_log/Makefile.inc spigotmc/README.md \ - spigotmc/Makefile.inc springboot/README.md \ - springboot/Makefile.inc squid/README.md squid/Makefile.inc \ - tomcat/README.md tomcat/Makefile.inc tor/README.md \ - tor/Makefile.inc traefik/README.md traefik/Makefile.inc \ - unbound/README.md unbound/Makefile.inc uwsgi/README.md \ - uwsgi/Makefile.inc varnish/README.md varnish/Makefile.inc \ - w1sensor/README.md w1sensor/Makefile.inc web_log/README.md \ - web_log/Makefile.inc -dist_python_SCRIPTS = \ - $(NULL) - - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files -dist_python_DATA = $(NULL) adaptec_raid/adaptec_raid.chart.py \ - apache/apache.chart.py beanstalk/beanstalk.chart.py \ - bind_rndc/bind_rndc.chart.py boinc/boinc.chart.py \ - ceph/ceph.chart.py chrony/chrony.chart.py \ - couchdb/couchdb.chart.py cpufreq/cpufreq.chart.py \ - cpuidle/cpuidle.chart.py dnsdist/dnsdist.chart.py \ - dns_query_time/dns_query_time.chart.py \ - dockerd/dockerd.chart.py dovecot/dovecot.chart.py \ - elasticsearch/elasticsearch.chart.py example/example.chart.py \ - exim/exim.chart.py fail2ban/fail2ban.chart.py \ - freeradius/freeradius.chart.py go_expvar/go_expvar.chart.py \ - haproxy/haproxy.chart.py hddtemp/hddtemp.chart.py \ - httpcheck/httpcheck.chart.py icecast/icecast.chart.py \ - ipfs/ipfs.chart.py isc_dhcpd/isc_dhcpd.chart.py \ - linux_power_supply/linux_power_supply.chart.py \ - litespeed/litespeed.chart.py logind/logind.chart.py \ - mdstat/mdstat.chart.py megacli/megacli.chart.py \ - memcached/memcached.chart.py mongodb/mongodb.chart.py \ - monit/monit.chart.py mysql/mysql.chart.py nginx/nginx.chart.py \ - nginx_plus/nginx_plus.chart.py nvidia_smi/nvidia_smi.chart.py \ - nsd/nsd.chart.py ntpd/ntpd.chart.py \ - ovpn_status_log/ovpn_status_log.chart.py \ - openldap/openldap.chart.py phpfpm/phpfpm.chart.py \ - portcheck/portcheck.chart.py postfix/postfix.chart.py \ - postgres/postgres.chart.py powerdns/powerdns.chart.py \ - proxysql/proxysql.chart.py puppet/puppet.chart.py \ - rabbitmq/rabbitmq.chart.py redis/redis.chart.py \ - rethinkdbs/rethinkdbs.chart.py retroshare/retroshare.chart.py \ - samba/samba.chart.py sensors/sensors.chart.py \ - smartd_log/smartd_log.chart.py spigotmc/spigotmc.chart.py \ - springboot/springboot.chart.py squid/squid.chart.py \ - tomcat/tomcat.chart.py tor/tor.chart.py \ - traefik/traefik.chart.py unbound/unbound.chart.py \ - uwsgi/uwsgi.chart.py varnish/varnish.chart.py \ - w1sensor/w1sensor.chart.py web_log/web_log.chart.py -userpythonconfigdir = $(configdir)/python.d -dist_userpythonconfig_DATA = \ - $(top_srcdir)/installer/.keep \ - $(NULL) - -pythonconfigdir = $(libconfigdir)/python.d -dist_pythonconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \ - adaptec_raid/adaptec_raid.conf apache/apache.conf \ - beanstalk/beanstalk.conf bind_rndc/bind_rndc.conf \ - boinc/boinc.conf ceph/ceph.conf chrony/chrony.conf \ - couchdb/couchdb.conf cpufreq/cpufreq.conf cpuidle/cpuidle.conf \ - dnsdist/dnsdist.conf dns_query_time/dns_query_time.conf \ - dockerd/dockerd.conf dovecot/dovecot.conf \ - elasticsearch/elasticsearch.conf example/example.conf \ - exim/exim.conf fail2ban/fail2ban.conf \ - freeradius/freeradius.conf go_expvar/go_expvar.conf \ - haproxy/haproxy.conf hddtemp/hddtemp.conf \ - httpcheck/httpcheck.conf icecast/icecast.conf ipfs/ipfs.conf \ - isc_dhcpd/isc_dhcpd.conf \ - linux_power_supply/linux_power_supply.conf \ - litespeed/litespeed.conf logind/logind.conf mdstat/mdstat.conf \ - megacli/megacli.conf memcached/memcached.conf \ - mongodb/mongodb.conf monit/monit.conf mysql/mysql.conf \ - nginx/nginx.conf nginx_plus/nginx_plus.conf \ - nvidia_smi/nvidia_smi.conf nsd/nsd.conf ntpd/ntpd.conf \ - ovpn_status_log/ovpn_status_log.conf openldap/openldap.conf \ - phpfpm/phpfpm.conf portcheck/portcheck.conf \ - postfix/postfix.conf postgres/postgres.conf \ - powerdns/powerdns.conf proxysql/proxysql.conf \ - puppet/puppet.conf rabbitmq/rabbitmq.conf redis/redis.conf \ - rethinkdbs/rethinkdbs.conf retroshare/retroshare.conf \ - samba/samba.conf sensors/sensors.conf \ - smartd_log/smartd_log.conf spigotmc/spigotmc.conf \ - springboot/springboot.conf squid/squid.conf tomcat/tomcat.conf \ - tor/tor.conf traefik/traefik.conf unbound/unbound.conf \ - uwsgi/uwsgi.conf varnish/varnish.conf w1sensor/w1sensor.conf \ - web_log/web_log.conf -pythonmodulesdir = $(pythondir)/python_modules -dist_pythonmodules_DATA = \ - python_modules/__init__.py \ - $(NULL) - -basesdir = $(pythonmodulesdir)/bases -dist_bases_DATA = \ - python_modules/bases/__init__.py \ - python_modules/bases/charts.py \ - python_modules/bases/collection.py \ - python_modules/bases/loaders.py \ - python_modules/bases/loggers.py \ - $(NULL) - -bases_framework_servicesdir = $(basesdir)/FrameworkServices -dist_bases_framework_services_DATA = \ - python_modules/bases/FrameworkServices/__init__.py \ - python_modules/bases/FrameworkServices/ExecutableService.py \ - python_modules/bases/FrameworkServices/LogService.py \ - python_modules/bases/FrameworkServices/MySQLService.py \ - python_modules/bases/FrameworkServices/SimpleService.py \ - python_modules/bases/FrameworkServices/SocketService.py \ - python_modules/bases/FrameworkServices/UrlService.py \ - $(NULL) - -third_partydir = $(pythonmodulesdir)/third_party -dist_third_party_DATA = \ - python_modules/third_party/__init__.py \ - python_modules/third_party/ordereddict.py \ - python_modules/third_party/lm_sensors.py \ - python_modules/third_party/mcrcon.py \ - python_modules/third_party/boinc_client.py \ - python_modules/third_party/monotonic.py \ - $(NULL) - -pythonyaml2dir = $(pythonmodulesdir)/pyyaml2 -dist_pythonyaml2_DATA = \ - python_modules/pyyaml2/__init__.py \ - python_modules/pyyaml2/composer.py \ - python_modules/pyyaml2/constructor.py \ - python_modules/pyyaml2/cyaml.py \ - python_modules/pyyaml2/dumper.py \ - python_modules/pyyaml2/emitter.py \ - python_modules/pyyaml2/error.py \ - python_modules/pyyaml2/events.py \ - python_modules/pyyaml2/loader.py \ - python_modules/pyyaml2/nodes.py \ - python_modules/pyyaml2/parser.py \ - python_modules/pyyaml2/reader.py \ - python_modules/pyyaml2/representer.py \ - python_modules/pyyaml2/resolver.py \ - python_modules/pyyaml2/scanner.py \ - python_modules/pyyaml2/serializer.py \ - python_modules/pyyaml2/tokens.py \ - $(NULL) - -pythonyaml3dir = $(pythonmodulesdir)/pyyaml3 -dist_pythonyaml3_DATA = \ - python_modules/pyyaml3/__init__.py \ - python_modules/pyyaml3/composer.py \ - python_modules/pyyaml3/constructor.py \ - python_modules/pyyaml3/cyaml.py \ - python_modules/pyyaml3/dumper.py \ - python_modules/pyyaml3/emitter.py \ - python_modules/pyyaml3/error.py \ - python_modules/pyyaml3/events.py \ - python_modules/pyyaml3/loader.py \ - python_modules/pyyaml3/nodes.py \ - python_modules/pyyaml3/parser.py \ - python_modules/pyyaml3/reader.py \ - python_modules/pyyaml3/representer.py \ - python_modules/pyyaml3/resolver.py \ - python_modules/pyyaml3/scanner.py \ - python_modules/pyyaml3/serializer.py \ - python_modules/pyyaml3/tokens.py \ - $(NULL) - -python_urllib3dir = $(pythonmodulesdir)/urllib3 -dist_python_urllib3_DATA = \ - python_modules/urllib3/__init__.py \ - python_modules/urllib3/_collections.py \ - python_modules/urllib3/connection.py \ - python_modules/urllib3/connectionpool.py \ - python_modules/urllib3/exceptions.py \ - python_modules/urllib3/fields.py \ - python_modules/urllib3/filepost.py \ - python_modules/urllib3/response.py \ - python_modules/urllib3/poolmanager.py \ - python_modules/urllib3/request.py \ - $(NULL) - -python_urllib3_utildir = $(python_urllib3dir)/util -dist_python_urllib3_util_DATA = \ - python_modules/urllib3/util/__init__.py \ - python_modules/urllib3/util/connection.py \ - python_modules/urllib3/util/request.py \ - python_modules/urllib3/util/response.py \ - python_modules/urllib3/util/retry.py \ - python_modules/urllib3/util/selectors.py \ - python_modules/urllib3/util/ssl_.py \ - python_modules/urllib3/util/timeout.py \ - python_modules/urllib3/util/url.py \ - python_modules/urllib3/util/wait.py \ - $(NULL) - -python_urllib3_packagesdir = $(python_urllib3dir)/packages -dist_python_urllib3_packages_DATA = \ - python_modules/urllib3/packages/__init__.py \ - python_modules/urllib3/packages/ordered_dict.py \ - python_modules/urllib3/packages/six.py \ - $(NULL) - -python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports -dist_python_urllib3_backports_DATA = \ - python_modules/urllib3/packages/backports/__init__.py \ - python_modules/urllib3/packages/backports/makefile.py \ - $(NULL) - -python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname -dist_python_urllib3_ssl_match_hostname_DATA = \ - python_modules/urllib3/packages/ssl_match_hostname/__init__.py \ - python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \ - $(NULL) - -python_urllib3_contribdir = $(python_urllib3dir)/contrib -dist_python_urllib3_contrib_DATA = \ - python_modules/urllib3/contrib/__init__.py \ - python_modules/urllib3/contrib/appengine.py \ - python_modules/urllib3/contrib/ntlmpool.py \ - python_modules/urllib3/contrib/pyopenssl.py \ - python_modules/urllib3/contrib/securetransport.py \ - python_modules/urllib3/contrib/socks.py \ - $(NULL) - -python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport -dist_python_urllib3_securetransport_DATA = \ - python_modules/urllib3/contrib/_securetransport/__init__.py \ - python_modules/urllib3/contrib/_securetransport/bindings.py \ - python_modules/urllib3/contrib/_securetransport/low_level.py \ - $(NULL) - -all: all-am - -.SUFFIXES: -.SUFFIXES: .in -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nvidia_smi/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/openldap/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/tor/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pluginsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) -install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pythonSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir) -install-dist_basesDATA: $(dist_bases_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \ - done - -uninstall-dist_basesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir) -install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \ - done - -uninstall-dist_bases_framework_servicesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir) -install-dist_libconfigDATA: $(dist_libconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ - done - -uninstall-dist_libconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_pythonDATA: $(dist_python_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \ - done - -uninstall-dist_pythonDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3DATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_backportsDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_contribDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_packagesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_securetransportDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_ssl_match_hostnameDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir) -install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \ - done - -uninstall-dist_python_urllib3_utilDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir) -install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \ - done - -uninstall-dist_pythonconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \ - done - -uninstall-dist_pythonmodulesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir) -install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \ - done - -uninstall-dist_pythonyaml2DATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir) -install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \ - done - -uninstall-dist_pythonyaml3DATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir) -install-dist_third_partyDATA: $(dist_third_party_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \ - done - -uninstall-dist_third_partyDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir) -install-dist_userpythonconfigDATA: $(dist_userpythonconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(userpythonconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(userpythonconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userpythonconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(userpythonconfigdir)" || exit $$?; \ - done - -uninstall-dist_userpythonconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(userpythonconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" "$(DESTDIR)$(userpythonconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_basesDATA \ - install-dist_bases_framework_servicesDATA \ - install-dist_libconfigDATA install-dist_pluginsSCRIPTS \ - install-dist_pythonDATA install-dist_pythonSCRIPTS \ - install-dist_python_urllib3DATA \ - install-dist_python_urllib3_backportsDATA \ - install-dist_python_urllib3_contribDATA \ - install-dist_python_urllib3_packagesDATA \ - install-dist_python_urllib3_securetransportDATA \ - install-dist_python_urllib3_ssl_match_hostnameDATA \ - install-dist_python_urllib3_utilDATA \ - install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \ - install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \ - install-dist_third_partyDATA install-dist_userpythonconfigDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_basesDATA \ - uninstall-dist_bases_framework_servicesDATA \ - uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \ - uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \ - uninstall-dist_python_urllib3DATA \ - uninstall-dist_python_urllib3_backportsDATA \ - uninstall-dist_python_urllib3_contribDATA \ - uninstall-dist_python_urllib3_packagesDATA \ - uninstall-dist_python_urllib3_securetransportDATA \ - uninstall-dist_python_urllib3_ssl_match_hostnameDATA \ - uninstall-dist_python_urllib3_utilDATA \ - uninstall-dist_pythonconfigDATA \ - uninstall-dist_pythonmodulesDATA \ - uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \ - uninstall-dist_third_partyDATA \ - uninstall-dist_userpythonconfigDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_basesDATA \ - install-dist_bases_framework_servicesDATA \ - install-dist_libconfigDATA install-dist_pluginsSCRIPTS \ - install-dist_pythonDATA install-dist_pythonSCRIPTS \ - install-dist_python_urllib3DATA \ - install-dist_python_urllib3_backportsDATA \ - install-dist_python_urllib3_contribDATA \ - install-dist_python_urllib3_packagesDATA \ - install-dist_python_urllib3_securetransportDATA \ - install-dist_python_urllib3_ssl_match_hostnameDATA \ - install-dist_python_urllib3_utilDATA \ - install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \ - install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \ - install-dist_third_partyDATA install-dist_userpythonconfigDATA \ - install-dvi install-dvi-am install-exec install-exec-am \ - install-html install-html-am install-info install-info-am \ - install-man install-pdf install-pdf-am install-ps \ - install-ps-am install-strip installcheck installcheck-am \ - installdirs maintainer-clean maintainer-clean-generic \ - mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \ - uninstall uninstall-am uninstall-dist_basesDATA \ - uninstall-dist_bases_framework_servicesDATA \ - uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \ - uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \ - uninstall-dist_python_urllib3DATA \ - uninstall-dist_python_urllib3_backportsDATA \ - uninstall-dist_python_urllib3_contribDATA \ - uninstall-dist_python_urllib3_packagesDATA \ - uninstall-dist_python_urllib3_securetransportDATA \ - uninstall-dist_python_urllib3_ssl_match_hostnameDATA \ - uninstall-dist_python_urllib3_utilDATA \ - uninstall-dist_pythonconfigDATA \ - uninstall-dist_pythonmodulesDATA \ - uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \ - uninstall-dist_third_partyDATA \ - uninstall-dist_userpythonconfigDATA - -.in: - if sed \ - -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ - -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ - -e 's#[@]configdir_POST@#$(configdir)#g' \ - -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ - -e 's#[@]cachedir_POST@#$(cachedir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md index 673fc2c99..8955197a7 100644 --- a/collectors/python.d.plugin/README.md +++ b/collectors/python.d.plugin/README.md @@ -9,21 +9,6 @@ 5. Allows each **module** to have one or more data collection **jobs** 6. Each **job** is collecting one or more metrics from a single data source -## Pull Request Checklist for Python Plugins - -This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive. - -At minimum, to be buildable and testable, the PR needs to include: - -* The module itself, following proper naming conventions: `python.d//.chart.py` -* A README.md file for the plugin under `python.d/`. -* The configuration file for the module: `conf.d/python.d/.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md -* A basic configuration for the plugin in the appropriate global config file: `conf.d/python.d.conf`, which is also in YAML format. Either add a line that reads `# : yes` if the module is to be enabled by default, or one that reads `: no` if it is to be disabled by default. -* A line for the plugin in `python.d/Makefile.am` under `dist_python_DATA`. -* A line for the plugin configuration file in `conf.d/Makefile.am`, under `dist_pythonconfig_DATA` -* Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts. - - ## Disclaimer Every module should be compatible with python2 and python3. @@ -36,7 +21,6 @@ Every configuration file must have one of two formats: ```yaml update_every : 2 # update frequency -retries : 1 # how many failures in update() is tolerated priority : 20000 # where it is shown on dashboard other_var1 : bla # variables passed to module @@ -48,7 +32,6 @@ other_var2 : alb ```yaml # module defaults: update_every : 2 -retries : 1 priority : 20000 local: # job name @@ -57,13 +40,25 @@ local: # job name other_job: priority : 5 # job position on dashboard - retries : 20 # job retries other_var2 : val # module specific variable ``` -`update_every`, `retries`, and `priority` are always optional. +`update_every` and `priority` are always optional. ---- +## How to debug a python module + +``` +# become user netdata +sudo su -s /bin/bash netdata +``` +Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module: + +``` +# execute the plugin in debug mode, for a specific module +/opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin debug trace +/usr/libexec/netdata/plugins.d/python.d.plugin debug trace +``` +Where `[module]` is the directory name under https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin ## How to write a new module @@ -74,7 +69,9 @@ Writing new python module is simple. You just need to remember to include 5 majo - **_get_data** method - all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**) -If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to. +If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to. + +For a quick start, you can look at the [example plugin](example/example.chart.py). ### Global variables `ORDER` and `CHART` @@ -210,3 +207,19 @@ Sockets are accessed in non-blocking mode with 15 second timeout. After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method. `_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way. + +## Pull Request Checklist for Python Plugins + +This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive. + +At minimum, to be buildable and testable, the PR needs to include: + +* The module itself, following proper naming conventions: `python.d//.chart.py` +* A README.md file for the plugin under `python.d/`. +* The configuration file for the module: `conf.d/python.d/.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md +* A basic configuration for the plugin in the appropriate global config file: `conf.d/python.d.conf`, which is also in YAML format. Either add a line that reads `# : yes` if the module is to be enabled by default, or one that reads `: no` if it is to be disabled by default. +* A line for the plugin in `python.d/Makefile.am` under `dist_python_DATA`. +* A line for the plugin configuration file in `conf.d/Makefile.am`, under `dist_pythonconfig_DATA` +* Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md index 499dc9190..682280f2e 100644 --- a/collectors/python.d.plugin/adaptec_raid/README.md +++ b/collectors/python.d.plugin/adaptec_raid/README.md @@ -44,3 +44,5 @@ adaptec_raid: yes ![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png) --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fadaptec_raid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf index 253cbf5a9..fa462ec83 100644 --- a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf +++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf @@ -19,11 +19,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -50,6 +48,6 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md index c6d1d126a..090feb070 100644 --- a/collectors/python.d.plugin/apache/README.md +++ b/collectors/python.d.plugin/apache/README.md @@ -46,14 +46,14 @@ priority : 90100 local: url : 'http://localhost/server-status?auto' - retries : 20 remote: url : 'http://www.apache.org/server-status?auto' update_every : 5 - retries : 4 ``` Without configuration, module attempts to connect to `http://localhost/server-status?auto` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py index d136274d0..655616d07 100644 --- a/collectors/python.d.plugin/apache/apache.chart.py +++ b/collectors/python.d.plugin/apache/apache.chart.py @@ -5,64 +5,60 @@ from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'url': 'http://www.apache.org/server-status?auto' -# }} - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq'] + +ORDER = [ + 'requests', + 'connections', + 'conns_async', + 'net', + 'workers', + 'reqpersec', + 'bytespersec', + 'bytesperreq', +] CHARTS = { 'bytesperreq': { - 'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request', + 'options': [None, 'Lifetime Avg. Request Size', 'KiB', 'statistics', 'apache.bytesperreq', 'area'], 'lines': [ - ['size_req'] + ['size_req', 'size', 'absolute', 1, 1024 * 100000] ]}, 'workers': { - 'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'], + 'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'], 'lines': [ ['idle'], ['busy'], ]}, 'reqpersec': { - 'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics', + 'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics', 'apache.reqpersec', 'area'], 'lines': [ - ['requests_sec'] + ['requests_sec', 'requests', 'absolute', 1, 100000] ]}, 'bytespersec': { - 'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics', + 'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics', 'apache.bytesperreq', 'area'], 'lines': [ - ['size_sec', None, 'absolute', 8, 1000] + ['size_sec', None, 'absolute', 8, 1000 * 100000] ]}, 'requests': { - 'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'], + 'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'], 'lines': [ ['requests', None, 'incremental'] ]}, 'net': { - 'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'], + 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'], 'lines': [ ['sent', None, 'incremental', 8, 1] ]}, 'connections': { - 'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'], + 'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'], 'lines': [ ['connections'] ]}, 'conns_async': { - 'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'], + 'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'], 'lines': [ ['keepalive'], ['closing'], @@ -86,6 +82,14 @@ ASSIGNMENT = { 'ConnsAsyncWriting': 'writing' } +FLOAT_VALUES = [ + 'BytesPerReq', + 'ReqPerSec', + 'BytesPerSec', +] + +LIGHTTPD_MARKER = 'idle_servers' + class Service(UrlService): def __init__(self, configuration=None, name=None): @@ -96,20 +100,15 @@ class Service(UrlService): def check(self): self._manager = self._build_manager() + data = self._get_data() + if not data: return None - if 'idle_servers' in data: - self.module_name = 'lighttpd' - for chart in self.definitions: - if chart == 'workers': - lines = self.definitions[chart]['lines'] - lines[0] = ['idle_servers', 'idle'] - lines[1] = ['busy_servers', 'busy'] - opts = self.definitions[chart]['options'] - opts[1] = opts[1].replace('apache', 'lighttpd') - opts[4] = opts[4].replace('apache', 'lighttpd') + if LIGHTTPD_MARKER in data: + self.turn_into_lighttpd() + return True def _get_data(self): @@ -118,15 +117,44 @@ class Service(UrlService): :return: dict """ raw_data = self._get_raw_data() + if not raw_data: return None + data = dict() - for row in raw_data.split('\n'): - tmp = row.split(':') - if tmp[0] in ASSIGNMENT: - try: - data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1])) - except (IndexError, ValueError): - continue + for line in raw_data.split('\n'): + try: + parse_line(line, data) + except ValueError: + continue + return data or None + + def turn_into_lighttpd(self): + self.module_name = 'lighttpd' + for chart in self.definitions: + if chart == 'workers': + lines = self.definitions[chart]['lines'] + lines[0] = ['idle_servers', 'idle'] + lines[1] = ['busy_servers', 'busy'] + opts = self.definitions[chart]['options'] + opts[1] = opts[1].replace('apache', 'lighttpd') + opts[4] = opts[4].replace('apache', 'lighttpd') + + +def parse_line(line, data): + parts = line.split(':') + + if len(parts) != 2: + return + + key, value = parts[0], parts[1] + + if key not in ASSIGNMENT: + return + + if key in FLOAT_VALUES: + data[ASSIGNMENT[key]] = int((float(value) * 100000)) + else: + data[ASSIGNMENT[key]] = int(value) diff --git a/collectors/python.d.plugin/apache/apache.conf b/collectors/python.d.plugin/apache/apache.conf index 8b606f7e0..84e12a57c 100644 --- a/collectors/python.d.plugin/apache/apache.conf +++ b/collectors/python.d.plugin/apache/apache.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, apache also supports the following: diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md index c2d7d5787..8daa36604 100644 --- a/collectors/python.d.plugin/beanstalk/README.md +++ b/collectors/python.d.plugin/beanstalk/README.md @@ -101,3 +101,5 @@ port : 11300 If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbeanstalk%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py index 1472b4e1a..ed945a781 100644 --- a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py +++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py @@ -12,13 +12,18 @@ except ImportError: from bases.FrameworkServices.SimpleService import SimpleService from bases.loaders import safe_load -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 -ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs', - 'current_connections', 'binlog', 'uptime'] +ORDER = [ + 'cpu_usage', + 'jobs_rate', + 'connections_rate', + 'commands_rate', + 'current_tubes', + 'current_jobs', + 'current_connections', + 'binlog', + 'uptime', +] CHARTS = { 'cpu_usage': { diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf index 3b11d9192..7586ad26b 100644 --- a/collectors/python.d.plugin/beanstalk/beanstalk.conf +++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -68,7 +66,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # chart_cleanup: 10 # the JOB's chart cleanup interval in iterations # diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md index 688297ab3..fefe74931 100644 --- a/collectors/python.d.plugin/bind_rndc/README.md +++ b/collectors/python.d.plugin/bind_rndc/README.md @@ -58,3 +58,5 @@ local: If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbind_rndc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py index 423232f65..7ac1bc3dc 100644 --- a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py +++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py @@ -11,11 +11,15 @@ from subprocess import Popen from bases.collection import find_binary from bases.FrameworkServices.SimpleService import SimpleService -priority = 60000 -retries = 60 + update_every = 30 -ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size'] +ORDER = [ + 'name_server_statistics', + 'incoming_queries', + 'outgoing_queries', + 'named_stats_size', +] CHARTS = { 'name_server_statistics': { @@ -44,7 +48,7 @@ CHARTS = { 'lines': [ ]}, 'named_stats_size': { - 'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'], + 'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'], 'lines': [ ['stats_size', None, 'absolute', 1, 1 << 20] ] @@ -92,10 +96,20 @@ class Service(SimpleService): self.definitions = CHARTS self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats') self.rndc = find_binary('rndc') - self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0, - nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0, - nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0, - nms_dropped_queries=0) + self.data = dict( + nms_requests=0, + nms_responses=0, + nms_failure=0, + nms_auth=0, + nms_non_auth=0, + nms_nxrrset=0, + nms_success=0, + nms_nxdomain=0, + nms_recursion=0, + nms_duplicate=0, + nms_rejected_queries=0, + nms_dropped_queries=0, + ) def check(self): if not self.rndc: diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf index 71958ff98..3b7e9a216 100644 --- a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf +++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, bind_rndc also supports the following: diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md index 595bcd3c0..0f0aa1c6e 100644 --- a/collectors/python.d.plugin/boinc/README.md +++ b/collectors/python.d.plugin/boinc/README.md @@ -26,3 +26,5 @@ remote: ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fboinc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py index d14754c4b..e10b28cea 100644 --- a/collectors/python.d.plugin/boinc/boinc.chart.py +++ b/collectors/python.d.plugin/boinc/boinc.chart.py @@ -10,7 +10,12 @@ from bases.FrameworkServices.SimpleService import SimpleService from third_party import boinc_client -ORDER = ['tasks', 'states', 'sched_states', 'process_states'] +ORDER = [ + 'tasks', + 'states', + 'sched_states', + 'process_states', +] CHARTS = { 'tasks': { @@ -141,14 +146,16 @@ class Service(SimpleService): def _get_data(self): if not self.is_alive(): return None + data = dict(_DATA_TEMPLATE) - results = [] + try: results = self.client.get_tasks() except socket.error: self.error('Connection is dead') self.alive = False return None + for task in results: data['total'] += 1 data[_TASK_MAP[task.state]] += 1 @@ -159,4 +166,5 @@ class Service(SimpleService): data[_PROC_MAP[task.active_task_state]] += 1 except AttributeError: pass - return data + + return data or None diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf index e59d2509d..16edf55c4 100644 --- a/collectors/python.d.plugin/boinc/boinc.conf +++ b/collectors/python.d.plugin/boinc/boinc.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, boinc also supports the following: diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md index 29dfe5d1d..1f067c61c 100644 --- a/collectors/python.d.plugin/ceph/README.md +++ b/collectors/python.d.plugin/ceph/README.md @@ -30,3 +30,5 @@ local: ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fceph%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py index 31c764d0f..45b52620f 100644 --- a/collectors/python.d.plugin/ceph/ceph.chart.py +++ b/collectors/python.d.plugin/ceph/ceph.chart.py @@ -9,14 +9,13 @@ try: except ImportError: CEPH = False -import os import json +import os + from bases.FrameworkServices.SimpleService import SimpleService # default module values (can be overridden per job in `config`) update_every = 10 -priority = 60000 -retries = 60 ORDER = [ 'general_usage', @@ -37,7 +36,7 @@ ORDER = [ CHARTS = { 'general_usage': { - 'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'], + 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'], 'lines': [ ['general_available', 'avail', 'absolute'], ['general_usage', 'used', 'absolute'] @@ -50,7 +49,7 @@ CHARTS = { ] }, 'general_bytes': { - 'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes', + 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes', 'area'], 'lines': [ ['general_read_bytes', 'read', 'absolute', 1, 1024], @@ -74,7 +73,7 @@ CHARTS = { ] }, 'pool_usage': { - 'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'], + 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'], 'lines': [] }, 'pool_objects': { @@ -82,11 +81,11 @@ CHARTS = { 'lines': [] }, 'pool_read_bytes': { - 'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'], + 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'], 'lines': [] }, 'pool_write_bytes': { - 'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'], + 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'], 'lines': [] }, 'pool_read_operations': { @@ -98,7 +97,7 @@ CHARTS = { 'lines': [] }, 'osd_usage': { - 'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'], + 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'], 'lines': [] }, 'osd_apply_latency': { @@ -320,7 +319,7 @@ class Service(SimpleService): return json.loads(self.cluster.mon_command(json.dumps({ 'prefix': 'osd df', 'format': 'json' - }), '')[1]) + }), '')[1].replace('-nan', '"-nan"')) def _get_osd_perf(self): """ diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf index 78ac1e251..4caabbf6d 100644 --- a/collectors/python.d.plugin/ceph/ceph.conf +++ b/collectors/python.d.plugin/ceph/ceph.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 10 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, ceph plugin also supports the following: diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md index 30636fe77..67ed1a059 100644 --- a/collectors/python.d.plugin/chrony/README.md +++ b/collectors/python.d.plugin/chrony/README.md @@ -29,3 +29,5 @@ local: ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fchrony%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/chrony/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py index fd01d4e85..91f725001 100644 --- a/collectors/python.d.plugin/chrony/chrony.chart.py +++ b/collectors/python.d.plugin/chrony/chrony.chart.py @@ -7,11 +7,19 @@ from bases.FrameworkServices.ExecutableService import ExecutableService # default module values (can be overridden per job in `config`) update_every = 5 -priority = 60000 -retries = 10 + +CHRONY_COMMAND = 'chronyc -n tracking' # charts order (can be overridden if you want less charts, or different order) -ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew'] +ORDER = [ + 'system', + 'offsets', + 'stratum', + 'root', + 'frequency', + 'residualfreq', + 'skew', +] CHARTS = { 'system': { @@ -77,9 +85,9 @@ class Service(ExecutableService): def __init__(self, configuration=None, name=None): ExecutableService.__init__( self, configuration=configuration, name=name) - self.command = 'chronyc -n tracking' self.order = ORDER self.definitions = CHARTS + self.command = CHRONY_COMMAND def _get_data(self): """ diff --git a/collectors/python.d.plugin/chrony/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf index 9ac906b5f..fd95519b5 100644 --- a/collectors/python.d.plugin/chrony/chrony.conf +++ b/collectors/python.d.plugin/chrony/chrony.conf @@ -27,11 +27,9 @@ update_every: 5 # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ update_every: 5 # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, chrony also supports the following: diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md index eff8c0810..2cc353edb 100644 --- a/collectors/python.d.plugin/couchdb/README.md +++ b/collectors/python.d.plugin/couchdb/README.md @@ -33,3 +33,5 @@ local: ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcouchdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py index 5d6b9916f..a58694d70 100644 --- a/collectors/python.d.plugin/couchdb/couchdb.chart.py +++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py @@ -8,6 +8,7 @@ from collections import namedtuple, defaultdict from json import loads from threading import Thread from socket import gethostbyname, gaierror + try: from queue import Queue except ImportError: @@ -15,10 +16,9 @@ except ImportError: from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) + update_every = 1 -priority = 60000 -retries = 60 + METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats']) @@ -109,7 +109,7 @@ ORDER = [ CHARTS = { 'activity': { - 'options': [None, 'Overall Activity', 'req/s', + 'options': [None, 'Overall Activity', 'requests/s', 'dbactivity', 'couchdb.activity', 'stacked'], 'lines': [ ['couchdb_database_reads', 'DB reads', 'incremental'], @@ -118,7 +118,7 @@ CHARTS = { ] }, 'request_methods': { - 'options': [None, 'HTTP request methods', 'req/s', + 'options': [None, 'HTTP request methods', 'requests/s', 'httptraffic', 'couchdb.request_methods', 'stacked'], 'lines': [ @@ -133,7 +133,7 @@ CHARTS = { ] }, 'response_codes': { - 'options': [None, 'HTTP response status codes', 'resp/s', + 'options': [None, 'HTTP response status codes', 'responses/s', 'httptraffic', 'couchdb.response_codes', 'stacked'], 'lines': [ @@ -151,15 +151,13 @@ CHARTS = { ] }, 'open_files': { - 'options': [None, 'Open files', 'files', - 'ops', 'couchdb.open_files', 'line'], + 'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'], 'lines': [ ['couchdb_open_os_files', '# files', 'absolute'] ] }, 'active_tasks': { - 'options': [None, 'Active task breakdown', 'tasks', - 'ops', 'couchdb.active_tasks', 'stacked'], + 'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'], 'lines': [ ['activetasks_indexer', 'Indexer', 'absolute'], ['activetasks_database_compaction', 'DB Compaction', 'absolute'], @@ -168,8 +166,7 @@ CHARTS = { ] }, 'replicator_jobs': { - 'options': [None, 'Replicator job breakdown', 'jobs', - 'ops', 'couchdb.replicator_jobs', 'stacked'], + 'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'], 'lines': [ ['couch_replicator_jobs_running', 'Running', 'absolute'], ['couch_replicator_jobs_pending', 'Pending', 'absolute'], @@ -179,8 +176,7 @@ CHARTS = { ] }, 'erlang_memory': { - 'options': [None, 'Erlang VM memory usage', 'bytes', - 'erlang', 'couchdb.erlang_vm_memory', 'stacked'], + 'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'], 'lines': [ ['memory_atom', 'atom', 'absolute'], ['memory_binary', 'binaries', 'absolute'], @@ -191,23 +187,20 @@ CHARTS = { ] }, 'erlang_reductions': { - 'options': [None, 'Erlang reductions', 'count', - 'erlang', 'couchdb.reductions', 'line'], + 'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'], 'lines': [ ['reductions', 'reductions', 'incremental'] ] }, 'erlang_proc_counts': { - 'options': [None, 'Process counts', 'count', - 'erlang', 'couchdb.proccounts', 'line'], + 'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'], 'lines': [ ['os_proc_count', 'OS procs', 'absolute'], ['process_count', 'erl procs', 'absolute'] ] }, 'erlang_peak_msg_queue': { - 'options': [None, 'Peak message queue size', 'count', - 'erlang', 'couchdb.peakmsgqueue', + 'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue', 'line'], 'lines': [ ['peak_msg_queue', 'peak size', 'absolute'] @@ -215,18 +208,15 @@ CHARTS = { }, # Lines for the following are added as part of check() 'db_sizes_file': { - 'options': [None, 'Database sizes (file)', 'KB', - 'perdbstats', 'couchdb.db_sizes_file', 'line'], + 'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'], 'lines': [] }, 'db_sizes_external': { - 'options': [None, 'Database sizes (external)', 'KB', - 'perdbstats', 'couchdb.db_sizes_external', 'line'], + 'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'], 'lines': [] }, 'db_sizes_active': { - 'options': [None, 'Database sizes (active)', 'KB', - 'perdbstats', 'couchdb.db_sizes_active', 'line'], + 'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'], 'lines': [] }, 'db_doc_counts': { @@ -235,8 +225,7 @@ CHARTS = { 'lines': [] }, 'db_doc_del_counts': { - 'options': [None, 'Database # of deleted docs', 'docs', - 'perdbstats', 'couchdb_db_doc_del_count', 'line'], + 'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'], 'lines': [] } } @@ -256,7 +245,7 @@ class Service(UrlService): try: self.dbs = self.configuration.get('databases').split(' ') except (KeyError, AttributeError): - self.dbs = [] + self.dbs = list() def check(self): if not (self.host and self.port): diff --git a/collectors/python.d.plugin/couchdb/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf index 5f6e75cff..9c68be777 100644 --- a/collectors/python.d.plugin/couchdb/couchdb.conf +++ b/collectors/python.d.plugin/couchdb/couchdb.conf @@ -28,11 +28,9 @@ update_every: 10 # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -59,7 +57,7 @@ update_every: 10 # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, the couchdb plugin also supports the following: diff --git a/collectors/python.d.plugin/cpufreq/README.md b/collectors/python.d.plugin/cpufreq/README.md index 33891d59d..f1fc1e8f2 100644 --- a/collectors/python.d.plugin/cpufreq/README.md +++ b/collectors/python.d.plugin/cpufreq/README.md @@ -1,5 +1,10 @@ # cpufreq +> THIS MODULE IS OBSOLETE. +> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT + +--- + This module shows the current CPU frequency as set by the cpufreq kernel module. @@ -28,3 +33,5 @@ If no configuration is given, module will search for cpufreq files in `/sys/devi Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/cpufreq/cpufreq.conf b/collectors/python.d.plugin/cpufreq/cpufreq.conf index 0890245d9..96c0884c6 100644 --- a/collectors/python.d.plugin/cpufreq/cpufreq.conf +++ b/collectors/python.d.plugin/cpufreq/cpufreq.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. diff --git a/collectors/python.d.plugin/cpuidle/README.md b/collectors/python.d.plugin/cpuidle/README.md index 495169638..bb6722a11 100644 --- a/collectors/python.d.plugin/cpuidle/README.md +++ b/collectors/python.d.plugin/cpuidle/README.md @@ -9,3 +9,5 @@ It produces one stacked chart per CPU, showing the percentage of time spent in each state. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcpuidle%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.conf b/collectors/python.d.plugin/cpuidle/cpuidle.conf index bc276fcd2..25f5fed64 100644 --- a/collectors/python.d.plugin/cpuidle/cpuidle.conf +++ b/collectors/python.d.plugin/cpuidle/cpuidle.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md index 3703e8aaf..73d70d3a2 100644 --- a/collectors/python.d.plugin/dns_query_time/README.md +++ b/collectors/python.d.plugin/dns_query_time/README.md @@ -8,3 +8,5 @@ This module provides DNS query time statistics. It produces one aggregate chart or one chart per DNS server, showing the query time. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdns_query_time%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py index d3c3db788..4a5e0e108 100644 --- a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py +++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py @@ -28,10 +28,7 @@ except ImportError: from bases.FrameworkServices.SimpleService import SimpleService -# default module values (can be overridden per job in `config`) update_every = 5 -priority = 60000 -retries = 60 class Service(SimpleService): @@ -46,14 +43,14 @@ class Service(SimpleService): def check(self): if not DNS_PYTHON: - self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py') + self.error("'python-dnspython' package is needed to use dns_query_time.chart.py") return False self.timeout = self.timeout if isinstance(self.timeout, int) else 4 if not all([self.domains, self.server_list, isinstance(self.server_list, str), isinstance(self.domains, str)]): - self.error('server_list and domain_list can\'t be empty') + self.error("server_list and domain_list can't be empty") return False else: self.domains, self.server_list = self.domains.split(), self.server_list.split() @@ -129,17 +126,27 @@ def create_charts(aggregate, server_list): } } for ns in server_list: - definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']) + dim = [ + '_'.join(['ns', ns.replace('.', '_')]), + ns, + 'absolute', + ] + definitions['dns_group']['lines'].append(dim) return order, definitions else: order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list] definitions = dict() + for ns in server_list: definitions[''.join(['dns_', ns.replace('.', '_')])] = { 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'], 'lines': [ - ['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'] + [ + '_'.join(['ns', ns.replace('.', '_')]), + ns, + 'absolute', + ] ] } return order, definitions diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf index d32c6db83..9c0838ee2 100644 --- a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf +++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, dns_query_time also supports the following: diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md index b646ae27c..c7647a116 100644 --- a/collectors/python.d.plugin/dnsdist/README.md +++ b/collectors/python.d.plugin/dnsdist/README.md @@ -52,3 +52,5 @@ localhost: ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdnsdist%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py index 1aff3f803..d60858659 100644 --- a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py +++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py @@ -90,9 +90,9 @@ CHARTS = { ] }, 'servermem': { - 'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'], + 'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'], 'lines': [ - ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576] + ['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20] ] }, 'query_latency': { diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf index aec58b8e1..324d65aaf 100644 --- a/collectors/python.d.plugin/dnsdist/dnsdist.conf +++ b/collectors/python.d.plugin/dnsdist/dnsdist.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -#retries: 600000 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md index d3f603808..b09a5d59f 100644 --- a/collectors/python.d.plugin/dockerd/README.md +++ b/collectors/python.d.plugin/dockerd/README.md @@ -3,7 +3,7 @@ Module monitor docker health metrics. **Requirement:** -* `docker` package +* `docker` package, required version 3.2.0+ Following charts are drawn: @@ -24,3 +24,5 @@ Following charts are drawn: ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdockerd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py index a0d3d7e65..8bd45df9e 100644 --- a/collectors/python.d.plugin/dockerd/dockerd.chart.py +++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py @@ -10,10 +10,8 @@ except ImportError: from bases.FrameworkServices.SimpleService import SimpleService -# default module values (can be overridden per job in `config`) -# update_every = 1 -priority = 60000 -retries = 60 +from distutils.version import StrictVersion + # charts order (can be overridden if you want less charts, or different order) ORDER = [ @@ -24,21 +22,21 @@ ORDER = [ CHARTS = { 'running_containers': { - 'options': [None, 'Number of running containers', 'running containers', 'running containers', + 'options': [None, 'Number of running containers', 'containers', 'running containers', 'docker.running_containers', 'line'], 'lines': [ ['running_containers', 'running'] ] }, 'healthy_containers': { - 'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers', + 'options': [None, 'Number of healthy containers', 'containers', 'healthy containers', 'docker.healthy_containers', 'line'], 'lines': [ ['healthy_containers', 'healthy'] ] }, 'unhealthy_containers': { - 'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers', + 'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers', 'docker.unhealthy_containers', 'line'], 'lines': [ ['unhealthy_containers', 'unhealthy'] @@ -47,15 +45,26 @@ CHARTS = { } +MIN_REQUIRED_VERSION = '3.2.0' + + class Service(SimpleService): def __init__(self, configuration=None, name=None): SimpleService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = CHARTS + self.client = None def check(self): if not HAS_DOCKER: - self.error('\'docker\' package is needed to use docker.chart.py') + self.error("'docker' package is needed to use dockerd module") + return False + + if StrictVersion(docker.__version__) < StrictVersion(MIN_REQUIRED_VERSION): + self.error("installed 'docker' package version {0}, minimum required version {1}, please upgrade".format( + docker.__version__, + MIN_REQUIRED_VERSION, + )) return False self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock')) @@ -70,6 +79,7 @@ class Service(SimpleService): def get_data(self): data = dict() + data['running_containers'] = len(self.client.containers.list(sparse=True)) data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True)) data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True)) diff --git a/collectors/python.d.plugin/dockerd/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf index 5ef17a1f5..96c8ee0d8 100644 --- a/collectors/python.d.plugin/dockerd/dockerd.conf +++ b/collectors/python.d.plugin/dockerd/dockerd.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 10 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, dockerd plugin also supports the following: diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md index 50950ecc1..de8788b36 100644 --- a/collectors/python.d.plugin/dovecot/README.md +++ b/collectors/python.d.plugin/dovecot/README.md @@ -1,9 +1,13 @@ # dovecot This module provides statistics information from Dovecot server. + Statistics are taken from dovecot socket by executing `EXPORT global` command. More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics) +Module isn't compatible with new statistic api (v2.3), but you are still able to use the module with Dovecot v2.3 +by following [upgrading steps.](https://wiki2.dovecot.org/Upgrading/2.3). + **Requirement:** Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket. @@ -71,3 +75,5 @@ localsocket: If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdovecot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py index 7fee3bfac..be1fa53d5 100644 --- a/collectors/python.d.plugin/dovecot/dovecot.chart.py +++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py @@ -5,12 +5,10 @@ from bases.FrameworkServices.SocketService import SocketService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 -# charts order (can be overridden if you want less charts, or different order) +UNIX_SOCKET = '/var/run/dovecot/stats' + + ORDER = [ 'sessions', 'logins', @@ -53,14 +51,14 @@ CHARTS = { ] }, 'context_switches': { - 'options': [None, 'Dovecot Context Switches', '', 'context switches', 'dovecot.context_switches', 'line'], + 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches', 'line'], 'lines': [ ['vol_cs', 'voluntary', 'absolute'], ['invol_cs', 'involuntary', 'absolute'] ] }, 'io': { - 'options': [None, 'Dovecot Disk I/O', 'kilobytes/s', 'disk', 'dovecot.io', 'area'], + 'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'], 'lines': [ ['disk_input', 'read', 'incremental', 1, 1024], ['disk_output', 'write', 'incremental', -1, 1024] @@ -69,8 +67,8 @@ CHARTS = { 'net': { 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'], 'lines': [ - ['read_bytes', 'read', 'incremental', 8, 1024], - ['write_bytes', 'write', 'incremental', -8, 1024] + ['read_bytes', 'read', 'incremental', 8, 1000], + ['write_bytes', 'write', 'incremental', -8, 1000] ] }, 'syscalls': { @@ -113,13 +111,12 @@ CHARTS = { class Service(SocketService): def __init__(self, configuration=None, name=None): SocketService.__init__(self, configuration=configuration, name=name) - self.request = 'EXPORT\tglobal\r\n' - self.host = None # localhost - self.port = None # 24242 - # self._keep_alive = True - self.unix_socket = '/var/run/dovecot/stats' self.order = ORDER self.definitions = CHARTS + self.host = None # localhost + self.port = None # 24242 + self.unix_socket = UNIX_SOCKET + self.request = 'EXPORT\tglobal\r\n' def _get_data(self): """ diff --git a/collectors/python.d.plugin/dovecot/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf index 56c394991..451dbc9ac 100644 --- a/collectors/python.d.plugin/dovecot/dovecot.conf +++ b/collectors/python.d.plugin/dovecot/dovecot.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, dovecot also supports the following: @@ -94,3 +92,7 @@ localsocket: name : 'local' socket : '/var/run/dovecot/stats' +localsocket_old: + name : 'local' + socket : '/var/run/dovecot/old-stats' + diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md index 7ce6c0b74..6d25b02d1 100644 --- a/collectors/python.d.plugin/elasticsearch/README.md +++ b/collectors/python.d.plugin/elasticsearch/README.md @@ -58,3 +58,5 @@ local: If no configuration is given, module will fail to run. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Felasticsearch%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py index 3f431f6e0..f1ea03fe8 100644 --- a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py +++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py @@ -159,17 +159,20 @@ ORDER = [ 'fielddata_evictions_tripped', 'cluster_health_status', 'cluster_health_nodes', + 'cluster_health_pending_tasks', + 'cluster_health_flight_fetch', 'cluster_health_shards', 'cluster_stats_nodes', 'cluster_stats_query_cache', 'cluster_stats_docs', 'cluster_stats_store', - 'cluster_stats_indices_shards', + 'cluster_stats_indices', + 'cluster_stats_shards_total', ] CHARTS = { 'search_performance_total': { - 'options': [None, 'Queries And Fetches', 'number of', 'search performance', + 'options': [None, 'Queries And Fetches', 'events/s', 'search performance', 'elastic.search_performance_total', 'stacked'], 'lines': [ ['indices_search_query_total', 'queries', 'incremental'], @@ -177,7 +180,7 @@ CHARTS = { ] }, 'search_performance_current': { - 'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance', + 'options': [None, 'Queries and Fetches In Progress', 'events', 'search performance', 'elastic.search_performance_current', 'stacked'], 'lines': [ ['indices_search_query_current', 'queries', 'absolute'], @@ -193,14 +196,14 @@ CHARTS = { ] }, 'search_latency': { - 'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'], + 'options': [None, 'Query And Fetch Latency', 'milliseconds', 'search performance', 'elastic.search_latency', 'stacked'], 'lines': [ ['query_latency', 'query', 'absolute', 1, 1000], ['fetch_latency', 'fetch', 'absolute', 1, 1000] ] }, 'index_performance_total': { - 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of', + 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'events/s', 'indexing performance', 'elastic.index_performance_total', 'stacked'], 'lines': [ ['indices_indexing_index_total', 'indexed', 'incremental'], @@ -225,7 +228,7 @@ CHARTS = { ] }, 'index_latency': { - 'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance', + 'options': [None, 'Indexing And Flushing Latency', 'milliseconds', 'indexing performance', 'elastic.index_latency', 'stacked'], 'lines': [ ['indexing_latency', 'indexing', 'absolute', 1, 1000], @@ -233,7 +236,7 @@ CHARTS = { ] }, 'index_translog_operations': { - 'options': [None, 'Translog Operations', 'count', 'translog', + 'options': [None, 'Translog Operations', 'operations', 'translog', 'elastic.index_translog_operations', 'area'], 'lines': [ ['indices_translog_operations', 'total', 'absolute'], @@ -241,7 +244,7 @@ CHARTS = { ] }, 'index_translog_size': { - 'options': [None, 'Translog Size', 'MB', 'translog', + 'options': [None, 'Translog Size', 'MiB', 'translog', 'elastic.index_translog_size', 'area'], 'lines': [ ['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567], @@ -249,21 +252,21 @@ CHARTS = { ] }, 'index_segments_count': { - 'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments', + 'options': [None, 'Total Number Of Indices Segments', 'segments', 'indices segments', 'elastic.index_segments_count', 'line'], 'lines': [ ['indices_segments_count', 'segments', 'absolute'] ] }, 'index_segments_memory_writer': { - 'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments', + 'options': [None, 'Index Writer Memory Usage', 'MiB', 'indices segments', 'elastic.index_segments_memory_writer', 'area'], 'lines': [ ['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567] ] }, 'index_segments_memory': { - 'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments', + 'options': [None, 'Indices Segments Memory Usage', 'MiB', 'indices segments', 'elastic.index_segments_memory', 'stacked'], 'lines': [ ['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567], @@ -277,14 +280,14 @@ CHARTS = { ] }, 'jvm_mem_heap': { - 'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc', + 'options': [None, 'JVM Heap Percentage Currently in Use', 'percentage', 'memory usage and gc', 'elastic.jvm_heap', 'area'], 'lines': [ ['jvm_mem_heap_used_percent', 'inuse', 'absolute'] ] }, 'jvm_mem_heap_bytes': { - 'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc', + 'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc', 'elastic.jvm_heap_bytes', 'area'], 'lines': [ ['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576], @@ -292,7 +295,7 @@ CHARTS = { ] }, 'jvm_buffer_pool_count': { - 'options': [None, 'JVM Buffers', 'count', 'memory usage and gc', + 'options': [None, 'JVM Buffers', 'pools', 'memory usage and gc', 'elastic.jvm_buffer_pool_count', 'line'], 'lines': [ ['jvm_buffer_pools_direct_count', 'direct', 'absolute'], @@ -300,7 +303,7 @@ CHARTS = { ] }, 'jvm_direct_buffers_memory': { - 'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc', + 'options': [None, 'JVM Direct Buffers Memory', 'MiB', 'memory usage and gc', 'elastic.jvm_direct_buffers_memory', 'area'], 'lines': [ ['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567], @@ -308,7 +311,7 @@ CHARTS = { ] }, 'jvm_mapped_buffers_memory': { - 'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc', + 'options': [None, 'JVM Mapped Buffers Memory', 'MiB', 'memory usage and gc', 'elastic.jvm_mapped_buffers_memory', 'area'], 'lines': [ ['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567], @@ -316,14 +319,14 @@ CHARTS = { ] }, 'jvm_gc_count': { - 'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'], + 'options': [None, 'Garbage Collections', 'events/s', 'memory usage and gc', 'elastic.gc_count', 'stacked'], 'lines': [ ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'], ['jvm_gc_collectors_old_collection_count', 'old', 'incremental'] ] }, 'jvm_gc_time': { - 'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc', + 'options': [None, 'Time Spent On Garbage Collections', 'milliseconds', 'memory usage and gc', 'elastic.gc_time', 'stacked'], 'lines': [ ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'], @@ -353,13 +356,13 @@ CHARTS = { ] }, 'fielddata_cache': { - 'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'], + 'options': [None, 'Fielddata Cache', 'MiB', 'fielddata cache', 'elastic.fielddata_cache', 'line'], 'lines': [ ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576] ] }, 'fielddata_evictions_tripped': { - 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events', + 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'events/s', 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'], 'lines': [ ['indices_fielddata_evictions', 'evictions', 'incremental'], @@ -367,12 +370,24 @@ CHARTS = { ] }, 'cluster_health_nodes': { - 'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API', + 'options': [None, 'Nodes Statistics', 'nodes', 'cluster health API', 'elastic.cluster_health_nodes', 'stacked'], 'lines': [ ['number_of_nodes', 'nodes', 'absolute'], ['number_of_data_nodes', 'data_nodes', 'absolute'], + ] + }, + 'cluster_health_pending_tasks': { + 'options': [None, 'Tasks Statistics', 'tasks', 'cluster health API', + 'elastic.cluster_health_pending_tasks', 'line'], + 'lines': [ ['number_of_pending_tasks', 'pending_tasks', 'absolute'], + ] + }, + 'cluster_health_flight_fetch': { + 'options': [None, 'In Flight Fetches Statistics', 'fetches', 'cluster health API', + 'elastic.cluster_health_flight_fetch', 'line'], + 'lines': [ ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute'] ] }, @@ -420,24 +435,30 @@ CHARTS = { ] }, 'cluster_stats_docs': { - 'options': [None, 'Docs Statistics', 'count', 'cluster stats API', + 'options': [None, 'Docs Statistics', 'docs', 'cluster stats API', 'elastic.cluster_docs', 'line'], 'lines': [ ['indices_docs_count', 'docs', 'absolute'] ] }, 'cluster_stats_store': { - 'options': [None, 'Store Statistics', 'MB', 'cluster stats API', + 'options': [None, 'Store Statistics', 'MiB', 'cluster stats API', 'elastic.cluster_store', 'line'], 'lines': [ ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567] ] }, - 'cluster_stats_indices_shards': { - 'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API', - 'elastic.cluster_indices_shards', 'stacked'], + 'cluster_stats_indices': { + 'options': [None, 'Indices Statistics', 'indices', 'cluster stats API', + 'elastic.cluster_indices', 'line'], 'lines': [ ['indices_count', 'indices', 'absolute'], + ] + }, + 'cluster_stats_shards_total': { + 'options': [None, 'Total Shards Statistics', 'shards', 'cluster stats API', + 'elastic.cluster_shards_total', 'line'], + 'lines': [ ['indices_shards_total', 'shards', 'absolute'] ] }, @@ -450,7 +471,7 @@ CHARTS = { ] }, 'host_metrics_file_descriptors': { - 'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics', + 'options': [None, 'Available File Descriptors In Percent', 'percentage', 'host metrics', 'elastic.host_descriptors', 'area'], 'lines': [ ['file_descriptors_used', 'used', 'absolute', 1, 10] @@ -473,9 +494,11 @@ class Service(UrlService): self.definitions = CHARTS self.host = self.configuration.get('host') self.port = self.configuration.get('port', 9200) - self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'), - host=self.host, - port=self.port) + self.url = '{scheme}://{host}:{port}'.format( + scheme=self.configuration.get('scheme', 'http'), + host=self.host, + port=self.port, + ) self.latency = dict() self.methods = list() diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf index 213843bf9..e5c97e7ef 100644 --- a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf +++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, elasticsearch plugin also supports the following: diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md index f9f314ac4..d65f8cf90 100644 --- a/collectors/python.d.plugin/example/README.md +++ b/collectors/python.d.plugin/example/README.md @@ -1 +1,5 @@ -An example python data collection module. \ No newline at end of file +# example + +An example python data collection module. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py index 85defa4d1..cc8c18759 100644 --- a/collectors/python.d.plugin/example/example.chart.py +++ b/collectors/python.d.plugin/example/example.chart.py @@ -7,12 +7,13 @@ from random import SystemRandom from bases.FrameworkServices.SimpleService import SimpleService -# default module values -# update_every = 4 + priority = 90000 -retries = 60 -ORDER = ['random'] +ORDER = [ + 'random', +] + CHARTS = { 'random': { 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'], diff --git a/collectors/python.d.plugin/example/example.conf b/collectors/python.d.plugin/example/example.conf index e7fed9b50..3d8435173 100644 --- a/collectors/python.d.plugin/example/example.conf +++ b/collectors/python.d.plugin/example/example.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, example also supports the following: diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md index b9a62cad9..1cebb27ff 100644 --- a/collectors/python.d.plugin/exim/README.md +++ b/collectors/python.d.plugin/exim/README.md @@ -11,3 +11,5 @@ It produces only one chart: Configuration is not needed. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py index 5431dd46b..68b7b5cfb 100644 --- a/collectors/python.d.plugin/exim/exim.chart.py +++ b/collectors/python.d.plugin/exim/exim.chart.py @@ -5,13 +5,12 @@ from bases.FrameworkServices.ExecutableService import ExecutableService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['qemails'] +EXIM_COMMAND = 'exim -bpc' + +ORDER = [ + 'qemails', +] CHARTS = { 'qemails': { @@ -26,9 +25,9 @@ CHARTS = { class Service(ExecutableService): def __init__(self, configuration=None, name=None): ExecutableService.__init__(self, configuration=configuration, name=name) - self.command = 'exim -bpc' self.order = ORDER self.definitions = CHARTS + self.command = EXIM_COMMAND def _get_data(self): """ diff --git a/collectors/python.d.plugin/exim/exim.conf b/collectors/python.d.plugin/exim/exim.conf index 2add7b2cb..3b7e65922 100644 --- a/collectors/python.d.plugin/exim/exim.conf +++ b/collectors/python.d.plugin/exim/exim.conf @@ -28,11 +28,9 @@ update_every: 10 # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -59,7 +57,7 @@ update_every: 10 # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, exim also supports the following: diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md index 2ab021965..26511986a 100644 --- a/collectors/python.d.plugin/fail2ban/README.md +++ b/collectors/python.d.plugin/fail2ban/README.md @@ -21,3 +21,5 @@ If no configuration is given, module will attempt to read log file at `/var/log/ If conf file is not found default jail is `ssh`. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffail2ban%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py index 954689008..dfd2feab7 100644 --- a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py +++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py @@ -35,8 +35,19 @@ def charts(jails): }, } for jail in jails: - ch[ORDER[0]]['lines'].append([jail, jail, 'incremental']) - ch[ORDER[1]]['lines'].append(['{0}_in_jail'.format(jail), jail, 'absolute']) + dim = [ + jail, + jail, + 'incremental', + ] + ch[ORDER[0]]['lines'].append(dim) + + dim = [ + '{0}_in_jail'.format(jail), + jail, + 'absolute', + ] + ch[ORDER[1]]['lines'].append(dim) return ch @@ -46,7 +57,8 @@ RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)') # Example: # 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33 # 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 217.59.246.27 -RE_DATA = re.compile(r'\[(?P[A-Za-z-_0-9]+)\] (?PUnban|Ban) (?P[a-f0-9.:]+)') +# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Restore Ban 217.59.246.27 +RE_DATA = re.compile(r'\[(?P[A-Za-z-_0-9]+)\] (?PUnban|Ban|Restore Ban) (?P[a-f0-9.:]+)') DEFAULT_JAILS = [ 'ssh', @@ -58,12 +70,10 @@ class Service(LogService): LogService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = dict() - self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log') self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local') self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/') self.exclude = self.configuration.get('exclude', str()) - self.monitoring_jails = list() self.banned_ips = defaultdict(set) self.data = dict() @@ -116,7 +126,7 @@ class Service(LogService): jail, action, ip = match['jail'], match['action'], match['ip'] - if action == 'Ban': + if action == 'Ban' or action == 'Restore Ban': self.data[jail] += 1 if ip not in self.banned_ips[jail]: self.banned_ips[jail].add(ip) @@ -126,7 +136,7 @@ class Service(LogService): self.banned_ips[jail].remove(ip) self.data['{0}_in_jail'.format(jail)] -= 1 - return self.data + return self.data def get_files_from_dir(self, dir_path, suffix): """ diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf index 60ca87231..a36436b51 100644 --- a/collectors/python.d.plugin/fail2ban/fail2ban.conf +++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, fail2ban also supports the following: diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md index e5fe88ec3..00eb50dff 100644 --- a/collectors/python.d.plugin/freeradius/README.md +++ b/collectors/python.d.plugin/freeradius/README.md @@ -68,3 +68,5 @@ To do this, create a link from the sites-enabled directory to the status file in and restart/reload your FREERADIUS server. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffreeradius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py index 3126831b7..8563660cc 100644 --- a/collectors/python.d.plugin/freeradius/freeradius.chart.py +++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py @@ -3,25 +3,37 @@ # Author: l2isbad # SPDX-License-Identifier: GPL-3.0-or-later -from re import findall +import re from subprocess import Popen, PIPE from bases.collection import find_binary from bases.FrameworkServices.SimpleService import SimpleService -# default module values (can be overridden per job in `config`) -priority = 60000 -retries = 60 update_every = 15 +PARSER = re.compile(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)') + RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept' -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct'] +RADCLIENT_RETRIES = 1 +RADCLIENT_TIMEOUT = 1 + +DEFAULT_HOST = 'localhost' +DEFAULT_PORT = 18121 +DEFAULT_DO_ACCT = False +DEFAULT_DO_PROXY_AUTH = False +DEFAULT_DO_PROXY_ACCT = False + +ORDER = [ + 'authentication', + 'accounting', + 'proxy-auth', + 'proxy-acct', +] CHARTS = { 'authentication': { - 'options': [None, 'Authentication', 'packets/s', 'Authentication', 'freerad.auth', 'line'], + 'options': [None, 'Authentication', 'packets/s', 'authentication', 'freerad.auth', 'line'], 'lines': [ ['access-accepts', None, 'incremental'], ['access-rejects', None, 'incremental'], @@ -33,7 +45,7 @@ CHARTS = { ] }, 'accounting': { - 'options': [None, 'Accounting', 'packets/s', 'Accounting', 'freerad.acct', 'line'], + 'options': [None, 'Accounting', 'packets/s', 'accounting', 'freerad.acct', 'line'], 'lines': [ ['accounting-requests', 'requests', 'incremental'], ['accounting-responses', 'responses', 'incremental'], @@ -45,7 +57,7 @@ CHARTS = { ] }, 'proxy-auth': { - 'options': [None, 'Proxy Authentication', 'packets/s', 'Authentication', 'freerad.proxy.auth', 'line'], + 'options': [None, 'Proxy Authentication', 'packets/s', 'authentication', 'freerad.proxy.auth', 'line'], 'lines': [ ['proxy-access-accepts', 'access-accepts', 'incremental'], ['proxy-access-rejects', 'access-rejects', 'incremental'], @@ -57,7 +69,7 @@ CHARTS = { ] }, 'proxy-acct': { - 'options': [None, 'Proxy Accounting', 'packets/s', 'Accounting', 'freerad.proxy.acct', 'line'], + 'options': [None, 'Proxy Accounting', 'packets/s', 'accounting', 'freerad.proxy.acct', 'line'], 'lines': [ ['proxy-accounting-requests', 'requests', 'incremental'], ['proxy-accounting-responses', 'responses', 'incremental'], @@ -71,46 +83,80 @@ CHARTS = { } +def radclient_status(radclient, retries, timeout, host, port, secret): + # radclient -r 1 -t 1 -x 127.0.0.1:18121 status secret + + return '{radclient} -r {num_retries} -t {timeout} -x {host}:{port} status {secret}'.format( + radclient=radclient, + num_retries=retries, + timeout=timeout, + host=host, + port=port, + secret=secret, + ).split() + + class Service(SimpleService): def __init__(self, configuration=None, name=None): SimpleService.__init__(self, configuration=configuration, name=name) + self.order = ORDER self.definitions = CHARTS - self.host = self.configuration.get('host', 'localhost') - self.port = self.configuration.get('port', '18121') + self.host = self.configuration.get('host', DEFAULT_HOST) + self.port = self.configuration.get('port', DEFAULT_PORT) self.secret = self.configuration.get('secret') - self.acct = self.configuration.get('acct', False) - self.proxy_auth = self.configuration.get('proxy_auth', False) - self.proxy_acct = self.configuration.get('proxy_acct', False) - chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)] - self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice] + self.do_acct = self.configuration.get('acct', DEFAULT_DO_ACCT) + self.do_proxy_auth = self.configuration.get('proxy_auth', DEFAULT_DO_PROXY_AUTH) + self.do_proxy_acct = self.configuration.get('proxy_acct', DEFAULT_DO_PROXY_ACCT) self.echo = find_binary('echo') self.radclient = find_binary('radclient') self.sub_echo = [self.echo, RADIUS_MSG] - self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', '-x', - ':'.join([self.host, self.port]), 'status', self.secret] + self.sub_radclient = radclient_status( + self.radclient, RADCLIENT_RETRIES, RADCLIENT_TIMEOUT, self.host, self.port, self.secret, + ) def check(self): - if not all([self.echo, self.radclient]): - self.error('Can\'t locate "radclient" binary or binary is not executable by netdata') + if not self.radclient: + self.error("Can't locate 'radclient' binary or binary is not executable by netdata user") return False + + if not self.echo: + self.error("Can't locate 'echo' binary or binary is not executable by netdata user") + return None + if not self.secret: - self.error('"secret" not set') + self.error("'secret' isn't set") return None - if self._get_raw_data(): - return True - self.error('Request returned no data. Is server alive?') - return False + if not self.get_raw_data(): + self.error('Request returned no data. Is server alive?') + return False - def _get_data(self): + if not self.do_acct: + self.order.remove('accounting') + + if not self.do_proxy_auth: + self.order.remove('proxy-auth') + + if not self.do_proxy_acct: + self.order.remove('proxy-acct') + + return True + + def get_data(self): """ Format data received from shell command :return: dict """ - result = self._get_raw_data() - return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)]) + result = self.get_raw_data() - def _get_raw_data(self): + if not result: + return None + + return dict( + (key.lower(), value) for key, value in PARSER.findall(result) + ) + + def get_raw_data(self): """ The following code is equivalent to 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept" @@ -124,6 +170,8 @@ class Service(SimpleService): raw_result = process_rad.communicate()[0] except OSError: return None + if process_rad.returncode is 0: return raw_result.decode() + return None diff --git a/collectors/python.d.plugin/freeradius/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf index 3336d4c49..74b273776 100644 --- a/collectors/python.d.plugin/freeradius/freeradius.conf +++ b/collectors/python.d.plugin/freeradius/freeradius.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, freeradius also supports the following: diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md index e3356e1f1..3942a7be8 100644 --- a/collectors/python.d.plugin/go_expvar/README.md +++ b/collectors/python.d.plugin/go_expvar/README.md @@ -169,7 +169,6 @@ and its base `UrlService` class. These are: update_every: 1 # the job's data collection frequency priority: 60000 # the job's order on the dashboard - retries: 60 # the job's number of restoration attempts user: admin # use when the expvar endpoint is protected by HTTP Basic Auth password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth @@ -274,3 +273,5 @@ The images below show how do the final charts in netdata look. ![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgo_expvar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py index 76e8b72ec..e82a87761 100644 --- a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py +++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py @@ -6,17 +6,24 @@ from __future__ import division import json +from collections import namedtuple + from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 +MEMSTATS_ORDER = [ + 'memstats_heap', + 'memstats_stack', + 'memstats_mspan', + 'memstats_mcache', + 'memstats_sys', + 'memstats_live_objects', + 'memstats_gc_pauses', +] MEMSTATS_CHARTS = { 'memstats_heap': { - 'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats', + 'options': ['heap', 'memory: size of heap memory structures', 'KiB', 'memstats', 'expvar.memstats.heap', 'line'], 'lines': [ ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024], @@ -24,21 +31,21 @@ MEMSTATS_CHARTS = { ] }, 'memstats_stack': { - 'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats', + 'options': ['stack', 'memory: size of stack memory structures', 'KiB', 'memstats', 'expvar.memstats.stack', 'line'], 'lines': [ ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024] ] }, 'memstats_mspan': { - 'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats', + 'options': ['mspan', 'memory: size of mspan memory structures', 'KiB', 'memstats', 'expvar.memstats.mspan', 'line'], 'lines': [ ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024] ] }, 'memstats_mcache': { - 'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats', + 'options': ['mcache', 'memory: size of mcache memory structures', 'KiB', 'memstats', 'expvar.memstats.mcache', 'line'], 'lines': [ ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024] @@ -52,7 +59,7 @@ MEMSTATS_CHARTS = { ] }, 'memstats_sys': { - 'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats', + 'options': ['sys', 'memory: size of reserved virtual address space', 'KiB', 'memstats', 'expvar.memstats.sys', 'line'], 'lines': [ ['memstats_sys', 'sys', 'absolute', 1, 1024] @@ -67,8 +74,14 @@ MEMSTATS_CHARTS = { } } -MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache', - 'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses'] +EXPVAR = namedtuple( + "EXPVAR", + [ + "key", + "type", + "id", + ] +) def flatten(d, top='', sep='.'): @@ -85,7 +98,6 @@ def flatten(d, top='', sep='.'): class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) - # if memstats collection is enabled, add the charts and their order if self.configuration.get('collect_memstats'): self.definitions = dict(MEMSTATS_CHARTS) @@ -118,7 +130,7 @@ class Service(UrlService): def _parse_extra_charts_config(self, extra_charts_config): # a place to store the expvar keys and their types - self.expvars = dict() + self.expvars = list() for chart in extra_charts_config: @@ -156,11 +168,8 @@ class Service(UrlService): self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type)) continue - if ev_key in self.expvars: - self.info('Duplicate expvar key {0}: skipping line.'.format(ev_key)) - continue - - self.expvars[ev_key] = (ev_type, line_id) + # self.expvars[ev_key] = (ev_type, line_id) + self.expvars.append(EXPVAR(ev_key, ev_type, line_id)) chart_dict['lines'].append( [ @@ -197,21 +206,21 @@ class Service(UrlService): # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict. del (data['memstats']) flattened = flatten(data) - for k, v in flattened.items(): - ev = self.expvars.get(k) - if not ev: - # expvar is not defined in config, skip it + + for ev in self.expvars: + v = flattened.get(ev.key) + + if v is None: continue + try: - key_type, line_id = ev - if key_type == 'int': - expvars[line_id] = int(v) - elif key_type == 'float': - # if the value type is float, multiply it by 1000 and set line divisor to 1000 - expvars[line_id] = float(v) * 100 + if ev.type == 'int': + expvars[ev.id] = int(v) + elif ev.type == 'float': + expvars[ev.id] = float(v) * 100 except ValueError: - self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(k, key_type)) - del self.expvars[k] + self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(ev.key, ev.type)) + return None return expvars diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf index af89158aa..4b821cde9 100644 --- a/collectors/python.d.plugin/go_expvar/go_expvar.conf +++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -53,7 +51,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, this plugin also supports the following: diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md index 4bff25670..4bd80a23d 100644 --- a/collectors/python.d.plugin/haproxy/README.md +++ b/collectors/python.d.plugin/haproxy/README.md @@ -47,3 +47,5 @@ via_socket: If no configuration is given, module will fail to run. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhaproxy%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py index a46689f50..d97d28d2b 100644 --- a/collectors/python.d.plugin/haproxy/haproxy.chart.py +++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py @@ -14,12 +14,6 @@ except ImportError: from bases.FrameworkServices.SocketService import SocketService from bases.FrameworkServices.UrlService import UrlService - -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - # charts order (can be overridden if you want less charts, or different order) ORDER = [ 'fbin', @@ -56,11 +50,11 @@ ORDER = [ CHARTS = { 'fbin': { - 'options': [None, 'Kilobytes In', 'KB/s', 'frontend', 'haproxy_f.bin', 'line'], + 'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'], 'lines': [] }, 'fbout': { - 'options': [None, 'Kilobytes Out', 'KB/s', 'frontend', 'haproxy_f.bout', 'line'], + 'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'], 'lines': [] }, 'fscur': { @@ -101,11 +95,11 @@ CHARTS = { 'lines': [] }, 'bbin': { - 'options': [None, 'Kilobytes In', 'KB/s', 'backend', 'haproxy_b.bin', 'line'], + 'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'], 'lines': [] }, 'bbout': { - 'options': [None, 'Kilobytes Out', 'KB/s', 'backend', 'haproxy_b.bout', 'line'], + 'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'], 'lines': [] }, 'bscur': { @@ -146,41 +140,39 @@ CHARTS = { 'lines': [] }, 'bqtime': { - 'options': [None, 'The average queue time over the 1024 last requests', 'ms', 'backend', + 'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend', 'haproxy_b.qtime', 'line'], 'lines': [] }, 'bctime': { - 'options': [None, 'The average connect time over the 1024 last requests', 'ms', 'backend', + 'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend', 'haproxy_b.ctime', 'line'], 'lines': [] }, 'brtime': { - 'options': [None, 'The average response time over the 1024 last requests', 'ms', 'backend', + 'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend', 'haproxy_b.rtime', 'line'], 'lines': [] }, 'bttime': { - 'options': [None, 'The average total session time over the 1024 last requests', 'ms', 'backend', + 'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend', 'haproxy_b.ttime', 'line'], 'lines': [] }, 'health_sdown': { - 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', - 'haproxy_hs.down', 'line'], + 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'], 'lines': [] }, 'health_sup': { - 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', - 'haproxy_hs.up', 'line'], + 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'], 'lines': [] }, 'health_bdown': { - 'options': [None, 'Is Backend Alive? 1 = DOWN', 'failed backend', 'health', 'haproxy_hb.down', 'line'], + 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'], 'lines': [] }, 'health_idle': { - 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percent', 'health', 'haproxy.idle', 'line'], + 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'], 'lines': [ ['idle', None, 'absolute'] ] @@ -214,6 +206,7 @@ REGEX = dict(url=re_compile(r'idle = (?P[0-9]+)'), socket=re_compile(r'Idle_pct: (?P[0-9]+)')) +# TODO: the code is unreadable class Service(UrlService, SocketService): def __init__(self, configuration=None, name=None): if 'socket' in configuration: diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf index a40dd76a5..10a0df3c3 100644 --- a/collectors/python.d.plugin/haproxy/haproxy.conf +++ b/collectors/python.d.plugin/haproxy/haproxy.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, haproxy also supports the following: diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md index 1236186a5..d9f254d51 100644 --- a/collectors/python.d.plugin/hddtemp/README.md +++ b/collectors/python.d.plugin/hddtemp/README.md @@ -20,3 +20,5 @@ port: 7634 If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py index dea701171..810aaacc9 100644 --- a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py +++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py @@ -12,7 +12,9 @@ from copy import deepcopy from bases.FrameworkServices.SocketService import SocketService -ORDER = ['temperatures'] +ORDER = [ + 'temperatures', +] CHARTS = { 'temperatures': { @@ -39,11 +41,11 @@ class Service(SocketService): SocketService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = deepcopy(CHARTS) + self.do_only = self.configuration.get('devices') self._keep_alive = False self.request = "" self.host = "127.0.0.1" self.port = 7634 - self.do_only = self.configuration.get('devices') def get_disks(self): r = self._get_raw_data() @@ -89,8 +91,7 @@ class Service(SocketService): return False for d in disks: - n = d.id if d.id.startswith('sd') else d.name - dim = [d.id, n] + dim = [d.id] self.definitions['temperatures']['lines'].append(dim) return True diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf index 9165798a2..b2d7aef63 100644 --- a/collectors/python.d.plugin/hddtemp/hddtemp.conf +++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, hddtemp also supports the following: diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md index 759107663..4cd024d12 100644 --- a/collectors/python.d.plugin/httpcheck/README.md +++ b/collectors/python.d.plugin/httpcheck/README.md @@ -39,3 +39,5 @@ server: response time is low and should be used as reference only. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhttpcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py index f046f33c0..fd51370da 100644 --- a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py +++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py @@ -16,7 +16,6 @@ from bases.FrameworkServices.UrlService import UrlService # default module values (can be overridden per job in `config`) update_every = 3 priority = 60000 -retries = 60 # Response HTTP_RESPONSE_TIME = 'time' @@ -29,11 +28,15 @@ HTTP_BAD_STATUS = 'bad_status' HTTP_TIMEOUT = 'timeout' HTTP_NO_CONNECTION = 'no_connection' -ORDER = ['response_time', 'response_length', 'status'] +ORDER = [ + 'response_time', + 'response_length', + 'status', +] CHARTS = { 'response_time': { - 'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'], + 'options': [None, 'HTTP response time', 'milliseconds', 'response', 'httpcheck.responsetime', 'line'], 'lines': [ [HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000] ] @@ -60,12 +63,12 @@ CHARTS = { class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS pattern = self.configuration.get('regex') self.regex = re.compile(pattern) if pattern else None self.status_codes_accepted = self.configuration.get('status_accepted', [200]) self.follow_redirect = self.configuration.get('redirect', True) - self.order = ORDER - self.definitions = CHARTS def _get_data(self): """ diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf index bd21b5af8..1e1dd0205 100644 --- a/collectors/python.d.plugin/httpcheck/httpcheck.conf +++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf @@ -27,6 +27,10 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes + # chart_cleanup sets the default chart cleanup interval in iterations. # A chart is marked as obsolete if it has not been updated # 'chart_cleanup' iterations in a row. @@ -61,7 +65,7 @@ chart_cleanup: 0 # # JOBs sharing a name are mutually exclusive # update_every: 3 # [optional] the JOB's data collection frequency # priority: 60000 # [optional] the JOB's order on the dashboard -# retries: 60 # [optional] the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s) # url: 'http[s]://host-ip-or-dns[:port][path]' # # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80 diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md index a28a6c398..068da6a06 100644 --- a/collectors/python.d.plugin/icecast/README.md +++ b/collectors/python.d.plugin/icecast/README.md @@ -24,3 +24,5 @@ remote: Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ficecast%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py index d8813f9ba..40eaf89b9 100644 --- a/collectors/python.d.plugin/icecast/icecast.chart.py +++ b/collectors/python.d.plugin/icecast/icecast.chart.py @@ -8,11 +8,9 @@ import json from bases.FrameworkServices.UrlService import UrlService -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['listeners'] +ORDER = [ + 'listeners', +] CHARTS = { 'listeners': { diff --git a/collectors/python.d.plugin/icecast/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf index a900d06d3..a33074aef 100644 --- a/collectors/python.d.plugin/icecast/icecast.conf +++ b/collectors/python.d.plugin/icecast/icecast.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, icecast also supports the following: diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md index a30649a5f..a83920370 100644 --- a/collectors/python.d.plugin/ipfs/README.md +++ b/collectors/python.d.plugin/ipfs/README.md @@ -23,3 +23,5 @@ localhost: --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fipfs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py index 3f6794e48..8c89b4be1 100644 --- a/collectors/python.d.plugin/ipfs/ipfs.chart.py +++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py @@ -7,25 +7,17 @@ import json from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'url': 'http://localhost:5001' -# }} - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects'] +ORDER = [ + 'bandwidth', + 'peers', + 'repo_size', + 'repo_objects', +] CHARTS = { 'bandwidth': { - 'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'], + 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'], 'lines': [ ['in', None, 'absolute', 8, 1000], ['out', None, 'absolute', -8, 1000] @@ -38,10 +30,10 @@ CHARTS = { ] }, 'repo_size': { - 'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'], + 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'], 'lines': [ - ['avail', None, 'absolute', 1, 1e9], - ['size', None, 'absolute', 1, 1e9], + ['avail', None, 'absolute', 1, 1 << 30], + ['size', None, 'absolute', 1, 1 << 30], ] }, 'repo_objects': { @@ -69,11 +61,11 @@ SI_zeroes = { class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) - self.baseurl = self.configuration.get('url', 'http://localhost:5001') self.order = ORDER self.definitions = CHARTS - self.__storage_max = None + self.baseurl = self.configuration.get('url', 'http://localhost:5001') self.do_pinapi = self.configuration.get('pinapi') + self.__storage_max = None def _get_json(self, sub_url): """ @@ -135,6 +127,6 @@ class Service(UrlService): for new_key, orig_key, xmute in cfg[suburl]: try: r[new_key] = xmute(in_json[orig_key]) - except Exception: - continue + except Exception as error: + self.debug(error) return r or None diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf index e3df0f6bb..c7e186487 100644 --- a/collectors/python.d.plugin/ipfs/ipfs.conf +++ b/collectors/python.d.plugin/ipfs/ipfs.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, ipfs also supports the following: diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md index 334d86e33..67547e2f6 100644 --- a/collectors/python.d.plugin/isc_dhcpd/README.md +++ b/collectors/python.d.plugin/isc_dhcpd/README.md @@ -32,3 +32,5 @@ In case of python2 you need to install `py2-ipaddress` to make plugin work. The module will not work If no configuration is given. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fisc_dhcpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py index a9f274949..bbe7a9369 100644 --- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py +++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py @@ -19,14 +19,16 @@ from copy import deepcopy from bases.FrameworkServices.SimpleService import SimpleService -priority = 60000 -retries = 60 -ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total'] +ORDER = [ + 'pools_utilization', + 'pools_active_leases', + 'leases_total', +] CHARTS = { 'pools_utilization': { - 'options': [None, 'Pools Utilization', '%', 'utilization', 'isc_dhcpd.utilization', 'line'], + 'options': [None, 'Pools Utilization', 'percentage', 'utilization', 'isc_dhcpd.utilization', 'line'], 'lines': [] }, 'pools_active_leases': { @@ -120,7 +122,6 @@ class Service(SimpleService): SimpleService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = deepcopy(CHARTS) - lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases') self.dhcpd_leases = DhcpdLeasesFile(path=lease_path) self.pools = list() @@ -131,7 +132,7 @@ class Service(SimpleService): def check(self): if not HAVE_IP_ADDRESS: - self.error("'python-ipaddress' module is needed") + self.error("'python-ipaddress' package is needed") return False if not self.dhcpd_leases.is_valid(): @@ -190,6 +191,17 @@ class Service(SimpleService): def create_charts(self): for pool in self.pools: - self.definitions['pools_utilization']['lines'].append([pool.id + '_utilization', pool.name, - 'absolute', 1, 100]) - self.definitions['pools_active_leases']['lines'].append([pool.id + '_active_leases', pool.name]) + dim = [ + pool.id + '_utilization', + pool.name, + 'absolute', + 1, + 100, + ] + self.definitions['pools_utilization']['lines'].append(dim) + + dim = [ + pool.id + '_active_leases', + pool.name, + ] + self.definitions['pools_active_leases']['lines'].append(dim) diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf index 4a4c4a5e3..8dcb5082f 100644 --- a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf +++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, isc_dhcpd supports the following: diff --git a/collectors/python.d.plugin/linux_power_supply/README.md b/collectors/python.d.plugin/linux_power_supply/README.md index 5cfbe41ce..f5b05d199 100644 --- a/collectors/python.d.plugin/linux_power_supply/README.md +++ b/collectors/python.d.plugin/linux_power_supply/README.md @@ -1,4 +1,9 @@ -# linux\_power\_supply +# Linux power supply + +> THIS MODULE IS OBSOLETE. +> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT + +--- This module monitors variosu metrics reported by power supply drivers on Linux. This allows tracking and alerting on things like remaining @@ -65,3 +70,5 @@ the corresponding `min` or `empty`, which will then always read as zero. This way, alerts which match on these will still work. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flinux_power_supply%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf index 3cb610f7f..96eeef44f 100644 --- a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf +++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_everye -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # In addition to the above parameters, linux_power_supply also supports diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md index d1482f33c..88b672533 100644 --- a/collectors/python.d.plugin/litespeed/README.md +++ b/collectors/python.d.plugin/litespeed/README.md @@ -45,3 +45,5 @@ local: If no configuration is given, module will use "/tmp/lshttpd/". --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flitespeed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py index efdc6869c..9da94213e 100644 --- a/collectors/python.d.plugin/litespeed/litespeed.chart.py +++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py @@ -16,11 +16,15 @@ update_every = 10 # charts order (can be overridden if you want less charts, or different order) ORDER = [ - 'net_throughput_http', 'net_throughput_https', # net throughput - 'connections_http', 'connections_https', # connections - 'requests', 'requests_processing', # requests - 'pub_cache_hits', 'private_cache_hits', # cache - 'static_hits' # static + 'net_throughput_http', # net throughput + 'net_throughput_https', # net throughput + 'connections_http', # connections + 'connections_https', # connections + 'requests', # requests + 'requests_processing', # requests + 'pub_cache_hits', # cache + 'private_cache_hits', # cache + 'static_hits', # static ] CHARTS = { diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf index 17d0f690e..a326e184e 100644 --- a/collectors/python.d.plugin/litespeed/litespeed.conf +++ b/collectors/python.d.plugin/litespeed/litespeed.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, lightspeed also supports the following: diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md index 8f8670d4a..c35630c8f 100644 --- a/collectors/python.d.plugin/logind/README.md +++ b/collectors/python.d.plugin/logind/README.md @@ -52,3 +52,5 @@ is currently disabled by default, and needs to be explicitly enabled in `/etc/netdata/python.d.conf` before it will run. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flogind%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/logind/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py index bfc486c7f..708668649 100644 --- a/collectors/python.d.plugin/logind/logind.chart.py +++ b/collectors/python.d.plugin/logind/logind.chart.py @@ -8,7 +8,13 @@ from bases.FrameworkServices.ExecutableService import ExecutableService priority = 59999 disabled_by_default = True -ORDER = ['sessions', 'users', 'seats'] +LOGINCTL_COMMAND = 'loginctl list-sessions --no-legend' + +ORDER = [ + 'sessions', + 'users', + 'seats', +] CHARTS = { 'sessions': { @@ -39,9 +45,9 @@ CHARTS = { class Service(ExecutableService): def __init__(self, configuration=None, name=None): ExecutableService.__init__(self, configuration=configuration, name=name) - self.command = 'loginctl list-sessions --no-legend' self.order = ORDER self.definitions = CHARTS + self.command = LOGINCTL_COMMAND def _get_data(self): ret = { diff --git a/collectors/python.d.plugin/logind/logind.conf b/collectors/python.d.plugin/logind/logind.conf index 0623493de..01a859d21 100644 --- a/collectors/python.d.plugin/logind/logind.conf +++ b/collectors/python.d.plugin/logind/logind.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,5 +56,5 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds diff --git a/collectors/python.d.plugin/mdstat/README.md b/collectors/python.d.plugin/mdstat/README.md index 1ff8f7dab..f88346eec 100644 --- a/collectors/python.d.plugin/mdstat/README.md +++ b/collectors/python.d.plugin/mdstat/README.md @@ -1,5 +1,10 @@ # mdstat +> THIS MODULE IS OBSOLETE. +> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT + +--- + Module monitor /proc/mdstat It produces: @@ -24,3 +29,5 @@ It produces: No configuration is needed. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmdstat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/mdstat/mdstat.conf b/collectors/python.d.plugin/mdstat/mdstat.conf index 66a2f153c..c72b63835 100644 --- a/collectors/python.d.plugin/mdstat/mdstat.conf +++ b/collectors/python.d.plugin/mdstat/mdstat.conf @@ -19,11 +19,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md index d288a6353..e96015ddb 100644 --- a/collectors/python.d.plugin/megacli/README.md +++ b/collectors/python.d.plugin/megacli/README.md @@ -46,3 +46,5 @@ do_battery: yes ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmegacli%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py index 41a1079f6..e1a05e416 100644 --- a/collectors/python.d.plugin/megacli/megacli.chart.py +++ b/collectors/python.d.plugin/megacli/megacli.chart.py @@ -66,7 +66,7 @@ def battery_charts(bats): charts.update( { 'bbu_{0}_relative_charge'.format(b.id): { - 'options': [None, 'Relative State of Charge', '%', 'battery', + 'options': [None, 'Relative State of Charge', 'percentage', 'battery', 'megacli.bbu_relative_charge', 'line'], 'lines': [ ['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)], @@ -180,8 +180,8 @@ class Service(ExecutableService): ExecutableService.__init__(self, configuration=configuration, name=name) self.order = list() self.definitions = dict() - self.megacli = Megacli() self.do_battery = self.configuration.get('do_battery') + self.megacli = Megacli() def check_sudo(self): err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True) diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf index 73afb2f7f..1af4292d9 100644 --- a/collectors/python.d.plugin/megacli/megacli.conf +++ b/collectors/python.d.plugin/megacli/megacli.conf @@ -19,11 +19,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -50,7 +48,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, megacli also supports the following: diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md index 3521c109d..98627c4a3 100644 --- a/collectors/python.d.plugin/memcached/README.md +++ b/collectors/python.d.plugin/memcached/README.md @@ -67,3 +67,5 @@ localtcpip: If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmemcached%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py index 3c310ec69..9803dbb09 100644 --- a/collectors/python.d.plugin/memcached/memcached.chart.py +++ b/collectors/python.d.plugin/memcached/memcached.chart.py @@ -5,37 +5,37 @@ from bases.FrameworkServices.SocketService import SocketService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'host': 'localhost', -# 'port': 11211, -# 'unix_socket': None -# }} - -ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed', - 'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate'] + +ORDER = [ + 'cache', + 'net', + 'connections', + 'items', + 'evicted_reclaimed', + 'get', + 'get_rate', + 'set_rate', + 'cas', + 'delete', + 'increment', + 'decrement', + 'touch', + 'touch_rate', +] CHARTS = { 'cache': { - 'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'], + 'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'], 'lines': [ - ['avail', 'available', 'absolute', 1, 1048576], - ['used', 'used', 'absolute', 1, 1048576] + ['avail', 'available', 'absolute', 1, 1 << 20], + ['used', 'used', 'absolute', 1, 1 << 20] ] }, 'net': { 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'], 'lines': [ - ['bytes_read', 'in', 'incremental', 8, 1024], - ['bytes_written', 'out', 'incremental', -8, 1024] + ['bytes_read', 'in', 'incremental', 8, 1000], + ['bytes_written', 'out', 'incremental', -8, 1000], ] }, 'connections': { @@ -127,13 +127,13 @@ CHARTS = { class Service(SocketService): def __init__(self, configuration=None, name=None): SocketService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS self.request = 'stats\r\n' self.host = 'localhost' self.port = 11211 self._keep_alive = True self.unix_socket = None - self.order = ORDER - self.definitions = CHARTS def _get_data(self): """ diff --git a/collectors/python.d.plugin/memcached/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf index 85c3daf65..3286b4623 100644 --- a/collectors/python.d.plugin/memcached/memcached.conf +++ b/collectors/python.d.plugin/memcached/memcached.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, memcached also supports the following: diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md index 8e5f652c5..ac8930dd2 100644 --- a/collectors/python.d.plugin/mongodb/README.md +++ b/collectors/python.d.plugin/mongodb/README.md @@ -121,6 +121,33 @@ Number of charts depends on mongodb version, storage engine and other features ( 26. **Replication set member heartbeat latency** * member (time when last heartbeat was received from replica set member) +### prerequisite +Create a read-only user for the netdata in the admin database. + +1. Authenticate as the admin user. + +``` +use admin +db.auth("admin", "") +``` + +2. Create a user. + +``` +# MongoDB 2.x. +db.addUser("netdata", "", true) + +# MongoDB 3.x or higher. +db.createUser({ + "user":"netdata", + "pwd": "", + "roles" : [ + {role: 'read', db: 'admin' }, + {role: 'clusterMonitor', db: 'admin'}, + {role: 'read', db: 'local' } + ] +}) +``` ### configuration @@ -139,3 +166,5 @@ local: If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py index 10344342d..92740ff86 100644 --- a/collectors/python.d.plugin/mongodb/mongodb.chart.py +++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py @@ -16,10 +16,6 @@ except ImportError: from bases.FrameworkServices.SimpleService import SimpleService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 REPL_SET_STATES = [ ('1', 'primary'), @@ -209,21 +205,21 @@ CHARTS = { ] }, 'journaling_volume': { - 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance', + 'options': [None, 'Volume of data written to the journal', 'MiB', 'database performance', 'mongodb.journaling_volume', 'line'], 'lines': [ ['journaledMB', 'volume', 'absolute', 1, 100] ] }, 'background_flush_average': { - 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance', + 'options': [None, 'Average time taken by flushes to execute', 'milliseconds', 'database performance', 'mongodb.background_flush_average', 'line'], 'lines': [ ['average_ms', 'time', 'absolute', 1, 100] ] }, 'background_flush_last': { - 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance', + 'options': [None, 'Time taken by the last flush operation to execute', 'milliseconds', 'database performance', 'mongodb.background_flush_last', 'line'], 'lines': [ ['last_ms', 'time', 'absolute', 1, 100] @@ -269,7 +265,7 @@ CHARTS = { ] }, 'memory': { - 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'], + 'options': [None, 'Memory metrics', 'MiB', 'resource utilization', 'mongodb.memory', 'stacked'], 'lines': [ ['virtual', None, 'absolute', 1, 1], ['resident', None, 'absolute', 1, 1], @@ -313,7 +309,7 @@ CHARTS = { }, 'wiredtiger_cache': { 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes', - 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'], + 'percentage', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'], 'lines': [ ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000], ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000] @@ -333,14 +329,14 @@ CHARTS = { 'lines': [] }, 'tcmalloc_generic': { - 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'], + 'options': [None, 'Tcmalloc generic metrics', 'MiB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'], 'lines': [ - ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576], - ['heap_size', 'heap_size', 'absolute', 1, 1048576] + ['current_allocated_bytes', 'allocated', 'absolute', 1, 1 << 20], + ['heap_size', 'heap_size', 'absolute', 1, 1 << 20] ] }, 'tcmalloc_metrics': { - 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'], + 'options': [None, 'Tcmalloc metrics', 'KiB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'], 'lines': [ ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024], ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024], diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf index 62faef68d..f69acac79 100644 --- a/collectors/python.d.plugin/mongodb/mongodb.conf +++ b/collectors/python.d.plugin/mongodb/mongodb.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, mongodb also supports the following: diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md index 6d10240c9..0f69aff29 100644 --- a/collectors/python.d.plugin/monit/README.md +++ b/collectors/python.d.plugin/monit/README.md @@ -31,3 +31,5 @@ local: If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmonit%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py index 51943c0e1..3ac0032c5 100644 --- a/collectors/python.d.plugin/monit/monit.chart.py +++ b/collectors/python.d.plugin/monit/monit.chart.py @@ -6,13 +6,20 @@ import xml.etree.ElementTree as ET from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 # see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h) -MONIT_SERVICE_NAMES = ['Filesystem', 'Directory', 'File', 'Process', 'Host', 'System', 'Fifo', 'Program', 'Net'] +MONIT_SERVICE_NAMES = [ + 'Filesystem', + 'Directory', + 'File', + 'Process', + 'Host', + 'System', + 'Fifo', + 'Program', + 'Net', +] + DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8] # charts order (can be overridden if you want less charts, or different order) @@ -90,10 +97,10 @@ CHARTS = { class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) - base_url = self.configuration.get('url', 'http://localhost:2812') - self.url = '{0}/_status?format=xml&level=full'.format(base_url) self.order = ORDER self.definitions = CHARTS + base_url = self.configuration.get('url', 'http://localhost:2812') + self.url = '{0}/_status?format=xml&level=full'.format(base_url) def parse(self, data): try: @@ -105,15 +112,19 @@ class Service(UrlService): def check(self): self._manager = self._build_manager() + raw_data = self._get_raw_data() if not raw_data: return None + return bool(self.parse(raw_data)) def _get_data(self): raw_data = self._get_raw_data() + if not raw_data: return None + xml = self.parse(raw_data) if not xml: return None @@ -121,6 +132,7 @@ class Service(UrlService): data = {} for service_id in DEFAULT_SERVICES_IDS: service_category = MONIT_SERVICE_NAMES[service_id].lower() + if service_category == 'system': self.debug("Skipping service from 'System' category, because it's useless in graphs") continue diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf index f9c26dbc3..9a3fb6938 100644 --- a/collectors/python.d.plugin/monit/monit.conf +++ b/collectors/python.d.plugin/monit/monit.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, this plugin also supports the following: diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md index e38098e7e..498493a3f 100644 --- a/collectors/python.d.plugin/mysql/README.md +++ b/collectors/python.d.plugin/mysql/README.md @@ -65,7 +65,6 @@ Here is an example for 3 servers: ```yaml update_every : 10 priority : 90100 -retries : 5 local: 'my.cnf' : '/etc/mysql/my.cnf' @@ -82,9 +81,10 @@ remote: pass : 'bla' host : 'example.org' port : 9000 - retries : 20 ``` If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py index c4d1e8b3a..20d32f81b 100644 --- a/collectors/python.d.plugin/mysql/mysql.chart.py +++ b/collectors/python.d.plugin/mysql/mysql.chart.py @@ -6,10 +6,6 @@ from bases.FrameworkServices.MySQLService import MySQLService -# default module values (can be overridden per job in `config`) -# update_every = 3 -priority = 60000 -retries = 60 # query executed on MySQL server QUERY_GLOBAL = 'SHOW GLOBAL STATUS;' @@ -172,16 +168,19 @@ ORDER = [ 'binlog_cache', 'binlog_stmt_cache', 'threads', + 'threads_creation_rate', 'thread_cache_misses', 'innodb_io', 'innodb_io_ops', 'innodb_io_pending_ops', 'innodb_log', 'innodb_os_log', + 'innodb_os_log_fsync_writes', 'innodb_os_log_io', 'innodb_cur_row_lock', 'innodb_rows', 'innodb_buffer_pool_pages', + 'innodb_buffer_pool_flush_pages_requests', 'innodb_buffer_pool_bytes', 'innodb_buffer_pool_read_ahead', 'innodb_buffer_pool_reqs', @@ -206,14 +205,14 @@ ORDER = [ CHARTS = { 'net': { - 'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'], + 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'], 'lines': [ - ['Bytes_received', 'in', 'incremental', 8, 1024], - ['Bytes_sent', 'out', 'incremental', -8, 1024] + ['Bytes_received', 'in', 'incremental', 8, 1000], + ['Bytes_sent', 'out', 'incremental', -8, 1000] ] }, 'queries': { - 'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'], + 'options': [None, 'Queries', 'queries/s', 'queries', 'mysql.queries', 'line'], 'lines': [ ['Queries', 'queries', 'incremental'], ['Questions', 'questions', 'incremental'], @@ -221,7 +220,7 @@ CHARTS = { ] }, 'queries_type': { - 'options': [None, 'mysql Query type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'], + 'options': [None, 'Query Type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'], 'lines': [ ['Com_select', 'select', 'incremental'], ['Com_delete', 'delete', 'incremental'], @@ -232,7 +231,7 @@ CHARTS = { ] }, 'handlers': { - 'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'], + 'options': [None, 'Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'], 'lines': [ ['Handler_commit', 'commit', 'incremental'], ['Handler_delete', 'delete', 'incremental'], @@ -251,14 +250,14 @@ CHARTS = { ] }, 'table_locks': { - 'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'], + 'options': [None, 'Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'], 'lines': [ ['Table_locks_immediate', 'immediate', 'incremental'], ['Table_locks_waited', 'waited', 'incremental', -1, 1] ] }, 'join_issues': { - 'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'], + 'options': [None, 'Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'], 'lines': [ ['Select_full_join', 'full_join', 'incremental'], ['Select_full_range_join', 'full_range_join', 'incremental'], @@ -268,7 +267,7 @@ CHARTS = { ] }, 'sort_issues': { - 'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'], + 'options': [None, 'Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'], 'lines': [ ['Sort_merge_passes', 'merge_passes', 'incremental'], ['Sort_range', 'range', 'incremental'], @@ -276,7 +275,7 @@ CHARTS = { ] }, 'tmp': { - 'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'], + 'options': [None, 'Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'], 'lines': [ ['Created_tmp_disk_tables', 'disk_tables', 'incremental'], ['Created_tmp_files', 'files', 'incremental'], @@ -284,14 +283,14 @@ CHARTS = { ] }, 'connections': { - 'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'], + 'options': [None, 'Connections', 'connections/s', 'connections', 'mysql.connections', 'line'], 'lines': [ ['Connections', 'all', 'incremental'], ['Aborted_connects', 'aborted', 'incremental'] ] }, 'connections_active': { - 'options': [None, 'mysql Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'], + 'options': [None, 'Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'], 'lines': [ ['Threads_connected', 'active', 'absolute'], ['max_connections', 'limit', 'absolute'], @@ -299,21 +298,26 @@ CHARTS = { ] }, 'binlog_cache': { - 'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'], + 'options': [None, 'Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'], 'lines': [ ['Binlog_cache_disk_use', 'disk', 'incremental'], ['Binlog_cache_use', 'all', 'incremental'] ] }, 'threads': { - 'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'], + 'options': [None, 'Threads', 'threads', 'threads', 'mysql.threads', 'line'], 'lines': [ ['Threads_connected', 'connected', 'absolute'], - ['Threads_created', 'created', 'incremental'], ['Threads_cached', 'cached', 'absolute', -1, 1], ['Threads_running', 'running', 'absolute'], ] }, + 'threads_creation_rate': { + 'options': [None, 'Threads Creation Rate', 'threads', 'threads/s', 'mysql.threads', 'line'], + 'lines': [ + ['Threads_created', 'created', 'incremental'], + ] + }, 'thread_cache_misses': { 'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'], 'lines': [ @@ -321,14 +325,14 @@ CHARTS = { ] }, 'innodb_io': { - 'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'], + 'options': [None, 'InnoDB I/O Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_io', 'area'], 'lines': [ ['Innodb_data_read', 'read', 'incremental', 1, 1024], ['Innodb_data_written', 'write', 'incremental', -1, 1024] ] }, 'innodb_io_ops': { - 'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'], + 'options': [None, 'InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'], 'lines': [ ['Innodb_data_reads', 'reads', 'incremental'], ['Innodb_data_writes', 'writes', 'incremental', -1, 1], @@ -336,7 +340,7 @@ CHARTS = { ] }, 'innodb_io_pending_ops': { - 'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb', + 'options': [None, 'InnoDB Pending I/O Operations', 'operations', 'innodb', 'mysql.innodb_io_pending_ops', 'line'], 'lines': [ ['Innodb_data_pending_reads', 'reads', 'absolute'], @@ -345,7 +349,7 @@ CHARTS = { ] }, 'innodb_log': { - 'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'], + 'options': [None, 'InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'], 'lines': [ ['Innodb_log_waits', 'waits', 'incremental'], ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1], @@ -353,28 +357,33 @@ CHARTS = { ] }, 'innodb_os_log': { - 'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'], + 'options': [None, 'InnoDB OS Log Pending Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'], + 'lines': [ + ['Innodb_os_log_pending_fsyncs', 'fsyncs', 'absolute'], + ['Innodb_os_log_pending_writes', 'writes', 'absolute', -1, 1], + ] + }, + 'innodb_os_log_fsync_writes': { + 'options': [None, 'InnoDB OS Log Operations', 'operations/s', 'innodb', 'mysql.innodb_os_log', 'line'], 'lines': [ ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'], - ['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'], - ['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1], ] }, 'innodb_os_log_io': { - 'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'], + 'options': [None, 'InnoDB OS Log Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_os_log_io', 'area'], 'lines': [ ['Innodb_os_log_written', 'write', 'incremental', -1, 1024], ] }, 'innodb_cur_row_lock': { - 'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb', + 'options': [None, 'InnoDB Current Row Locks', 'operations', 'innodb', 'mysql.innodb_cur_row_lock', 'area'], 'lines': [ ['Innodb_row_lock_current_waits', 'current_waits', 'absolute'] ] }, 'innodb_rows': { - 'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'], + 'options': [None, 'InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'], 'lines': [ ['Innodb_rows_inserted', 'inserted', 'incremental'], ['Innodb_rows_read', 'read', 'incremental', 1, 1], @@ -383,19 +392,25 @@ CHARTS = { ] }, 'innodb_buffer_pool_pages': { - 'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb', + 'options': [None, 'InnoDB Buffer Pool Pages', 'pages', 'innodb', 'mysql.innodb_buffer_pool_pages', 'line'], 'lines': [ ['Innodb_buffer_pool_pages_data', 'data', 'absolute'], ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1], ['Innodb_buffer_pool_pages_free', 'free', 'absolute'], - ['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1], ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1], ['Innodb_buffer_pool_pages_total', 'total', 'absolute'] ] }, + 'innodb_buffer_pool_flush_pages_requests': { + 'options': [None, 'InnoDB Buffer Pool Flush Pages Requests', 'requests/s', 'innodb', + 'mysql.innodb_buffer_pool_pages', 'line'], + 'lines': [ + ['Innodb_buffer_pool_pages_flushed', 'flush pages', 'incremental'], + ] + }, 'innodb_buffer_pool_bytes': { - 'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'], + 'options': [None, 'InnoDB Buffer Pool Bytes', 'MiB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'], 'lines': [ ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024], ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024] @@ -411,7 +426,7 @@ CHARTS = { ] }, 'innodb_buffer_pool_reqs': { - 'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb', + 'options': [None, 'InnoDB Buffer Pool Requests', 'requests/s', 'innodb', 'mysql.innodb_buffer_pool_reqs', 'area'], 'lines': [ ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'], @@ -419,7 +434,7 @@ CHARTS = { ] }, 'innodb_buffer_pool_ops': { - 'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb', + 'options': [None, 'InnoDB Buffer Pool Operations', 'operations/s', 'innodb', 'mysql.innodb_buffer_pool_ops', 'area'], 'lines': [ ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'], @@ -427,7 +442,7 @@ CHARTS = { ] }, 'qcache_ops': { - 'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'], + 'options': [None, 'QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'], 'lines': [ ['Qcache_hits', 'hits', 'incremental'], ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1], @@ -436,26 +451,26 @@ CHARTS = { ] }, 'qcache': { - 'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'], + 'options': [None, 'QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'], 'lines': [ ['Qcache_queries_in_cache', 'queries', 'absolute'] ] }, 'qcache_freemem': { - 'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'], + 'options': [None, 'QCache Free Memory', 'MiB', 'qcache', 'mysql.qcache_freemem', 'area'], 'lines': [ ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024] ] }, 'qcache_memblocks': { - 'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'], + 'options': [None, 'QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'], 'lines': [ ['Qcache_free_blocks', 'free', 'absolute'], ['Qcache_total_blocks', 'total', 'absolute'] ] }, 'key_blocks': { - 'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'], + 'options': [None, 'MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'], 'lines': [ ['Key_blocks_unused', 'unused', 'absolute'], ['Key_blocks_used', 'used', 'absolute', -1, 1], @@ -463,14 +478,14 @@ CHARTS = { ] }, 'key_requests': { - 'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'], + 'options': [None, 'MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'], 'lines': [ ['Key_read_requests', 'reads', 'incremental'], ['Key_write_requests', 'writes', 'incremental', -1, 1] ] }, 'key_disk_ops': { - 'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s', + 'options': [None, 'MyISAM Key Cache Disk Operations', 'operations/s', 'myisam', 'mysql.key_disk_ops', 'area'], 'lines': [ ['Key_reads', 'reads', 'incremental'], @@ -478,19 +493,19 @@ CHARTS = { ] }, 'files': { - 'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'], + 'options': [None, 'Open Files', 'files', 'files', 'mysql.files', 'line'], 'lines': [ ['Open_files', 'files', 'absolute'] ] }, 'files_rate': { - 'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'], + 'options': [None, 'Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'], 'lines': [ ['Opened_files', 'files', 'incremental'] ] }, 'binlog_stmt_cache': { - 'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog', + 'options': [None, 'Binlog Statement Cache', 'statements/s', 'binlog', 'mysql.binlog_stmt_cache', 'line'], 'lines': [ ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'], @@ -498,7 +513,7 @@ CHARTS = { ] }, 'connection_errors': { - 'options': [None, 'mysql Connection Errors', 'connections/s', 'connections', + 'options': [None, 'Connection Errors', 'connections/s', 'connections', 'mysql.connection_errors', 'line'], 'lines': [ ['Connection_errors_accept', 'accept', 'incremental'], @@ -523,35 +538,35 @@ CHARTS = { ] }, 'galera_writesets': { - 'options': [None, 'Replicated writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'], + 'options': [None, 'Replicated Writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'], 'lines': [ ['wsrep_received', 'rx', 'incremental'], ['wsrep_replicated', 'tx', 'incremental', -1, 1], ] }, 'galera_bytes': { - 'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'], + 'options': [None, 'Replicated Bytes', 'KiB/s', 'galera', 'mysql.galera_bytes', 'area'], 'lines': [ ['wsrep_received_bytes', 'rx', 'incremental', 1, 1024], ['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024], ] }, 'galera_queue': { - 'options': [None, 'Galera queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'], + 'options': [None, 'Galera Queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'], 'lines': [ ['wsrep_local_recv_queue', 'rx', 'absolute'], ['wsrep_local_send_queue', 'tx', 'absolute', -1, 1], ] }, 'galera_conflicts': { - 'options': [None, 'Replication conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'], + 'options': [None, 'Replication Conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'], 'lines': [ ['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'], ['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1], ] }, 'galera_flow_control': { - 'options': [None, 'Flow control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'], + 'options': [None, 'Flow Control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'], 'lines': [ ['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000], ] @@ -564,7 +579,11 @@ class Service(MySQLService): MySQLService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = CHARTS - self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE, variables=QUERY_VARIABLES) + self.queries = dict( + global_status=QUERY_GLOBAL, + slave_status=QUERY_SLAVE, + variables=QUERY_VARIABLES, + ) def _get_data(self): diff --git a/collectors/python.d.plugin/mysql/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf index b5956a2c6..ac9b505bc 100644 --- a/collectors/python.d.plugin/mysql/mysql.conf +++ b/collectors/python.d.plugin/mysql/mysql.conf @@ -27,11 +27,10 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes + # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +57,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, mysql also supports the following: diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md index 007f45c7c..7854105b7 100644 --- a/collectors/python.d.plugin/nginx/README.md +++ b/collectors/python.d.plugin/nginx/README.md @@ -37,9 +37,10 @@ priority : 90100 local: url : 'http://localhost/stub_status' - retries : 10 ``` Without configuration, module attempts to connect to `http://localhost/stub_status` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py index 09c6bbd37..84a5985e4 100644 --- a/collectors/python.d.plugin/nginx/nginx.chart.py +++ b/collectors/python.d.plugin/nginx/nginx.chart.py @@ -5,38 +5,30 @@ from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'url': 'http://localhost/stub_status' -# }} - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['connections', 'requests', 'connection_status', 'connect_rate'] +ORDER = [ + 'connections', + 'requests', + 'connection_status', + 'connect_rate', +] CHARTS = { 'connections': { - 'options': [None, 'nginx Active Connections', 'connections', 'active connections', + 'options': [None, 'Active Connections', 'connections', 'active connections', 'nginx.connections', 'line'], 'lines': [ ['active'] ] }, 'requests': { - 'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'], + 'options': [None, 'Requests', 'requests/s', 'requests', 'nginx.requests', 'line'], 'lines': [ ['requests', None, 'incremental'] ] }, 'connection_status': { - 'options': [None, 'nginx Active Connections by Status', 'connections', 'status', + 'options': [None, 'Active Connections by Status', 'connections', 'status', 'nginx.connection_status', 'line'], 'lines': [ ['reading'], @@ -45,7 +37,7 @@ CHARTS = { ] }, 'connect_rate': { - 'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate', + 'options': [None, 'Connections Rate', 'connections/s', 'connections rate', 'nginx.connect_rate', 'line'], 'lines': [ ['accepts', 'accepted', 'incremental'], @@ -58,9 +50,9 @@ CHARTS = { class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) - self.url = self.configuration.get('url', 'http://localhost/stub_status') self.order = ORDER self.definitions = CHARTS + self.url = self.configuration.get('url', 'http://localhost/stub_status') def _get_data(self): """ diff --git a/collectors/python.d.plugin/nginx/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf index 71c521066..4001b4bbe 100644 --- a/collectors/python.d.plugin/nginx/nginx.conf +++ b/collectors/python.d.plugin/nginx/nginx.conf @@ -39,11 +39,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -70,7 +68,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, this plugin also supports the following: diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md index 43ec867a3..c20ce30a0 100644 --- a/collectors/python.d.plugin/nginx_plus/README.md +++ b/collectors/python.d.plugin/nginx_plus/README.md @@ -123,3 +123,5 @@ local: Without configuration, module fail to start. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx_plus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py index 1392f5a56..3082fdbe7 100644 --- a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py +++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py @@ -16,12 +16,7 @@ except ImportError: from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -update_every = 1 -priority = 60000 -retries = 60 -# charts order (can be overridden if you want less charts, or different order) ORDER = [ 'requests_total', 'requests_current', @@ -76,7 +71,7 @@ CHARTS = { ] }, 'ssl_memory_usage': { - 'options': [None, 'Memory Usage', '%', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'], + 'options': [None, 'Memory Usage', 'percentage', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'], 'lines': [ ['ssl_memory_usage', 'usage', 'absolute', 1, 100] ] @@ -95,7 +90,7 @@ def cache_charts(cache): charts = OrderedDict() charts['{0}_traffic'.format(cache.name)] = { - 'options': [None, 'Traffic', 'KB', family, 'nginx_plus.cache_traffic', 'stacked'], + 'options': [None, 'Traffic', 'KiB', family, 'nginx_plus.cache_traffic', 'stacked'], 'lines': [ ['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024], ['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024], @@ -103,7 +98,7 @@ def cache_charts(cache): ] } charts['{0}_memory_usage'.format(cache.name)] = { - 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.cache_memory_usage', 'area'], + 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.cache_memory_usage', 'area'], 'lines': [ ['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100], ] @@ -200,7 +195,8 @@ def web_upstream_charts(wu): 'lines': dimensions('active') } charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = { - 'options': [None, 'Peers Connections Usage', '%', family, 'nginx_plus.web_upstream_connections_usage', 'line'], + 'options': [None, 'Peers Connections Usage', 'percentage', family, + 'nginx_plus.web_upstream_connections_usage', 'line'], 'lines': dimensions('connections_usage', d=100) } # Traffic @@ -223,7 +219,7 @@ def web_upstream_charts(wu): # Response Time for peer in wu: charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = { - 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family, + 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'milliseconds', family, 'nginx_plus.web_upstream_peer_timings', 'line'], 'lines': [ ['_'.join([wu.name, peer.server, 'header_time']), 'header'], @@ -232,7 +228,7 @@ def web_upstream_charts(wu): } # Memory Usage charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = { - 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.web_upstream_memory_usage', 'area'], + 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.web_upstream_memory_usage', 'area'], 'lines': [ ['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100] ] diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf index 7b5c8f43f..201eb0eb7 100644 --- a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf +++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, nginx_plus also supports the following: diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md index 02c302f41..b118657d2 100644 --- a/collectors/python.d.plugin/nsd/README.md +++ b/collectors/python.d.plugin/nsd/README.md @@ -52,3 +52,5 @@ It produces: Configuration is not needed. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py index d713f46bd..77b0d7bbf 100644 --- a/collectors/python.d.plugin/nsd/nsd.chart.py +++ b/collectors/python.d.plugin/nsd/nsd.chart.py @@ -7,13 +7,20 @@ import re from bases.FrameworkServices.ExecutableService import ExecutableService -# default module values (can be overridden per job in `config`) -priority = 60000 -retries = 5 + update_every = 30 -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode'] +NSD_CONTROL_COMMAND = 'nsd-control stats_noreset' +REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)') + +ORDER = [ + 'queries', + 'zones', + 'protocol', + 'type', + 'transfer', + 'rcode', +] CHARTS = { 'queries': { @@ -79,22 +86,21 @@ CHARTS = { class Service(ExecutableService): def __init__(self, configuration=None, name=None): - ExecutableService.__init__( - self, configuration=configuration, name=name) - self.command = 'nsd-control stats_noreset' + ExecutableService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = CHARTS - self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)') + self.command = NSD_CONTROL_COMMAND def _get_data(self): lines = self._get_raw_data() if not lines: return None - r = self.regex - stats = dict((k.replace('.', '_'), int(v)) - for k, v in r.findall(''.join(lines))) + stats = dict( + (k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines)) + ) stats.setdefault('num_opcode_NOTIFY', 0) stats.setdefault('num_type_TYPE252', 0) stats.setdefault('num_type_TYPE255', 0) + return stats diff --git a/collectors/python.d.plugin/nsd/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf index 078e97216..77a8a3177 100644 --- a/collectors/python.d.plugin/nsd/nsd.conf +++ b/collectors/python.d.plugin/nsd/nsd.conf @@ -28,11 +28,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -59,7 +57,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, nsd also supports the following: diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md index b0fa17fde..d33fd877a 100644 --- a/collectors/python.d.plugin/ntpd/README.md +++ b/collectors/python.d.plugin/ntpd/README.md @@ -69,3 +69,5 @@ otherhost: If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fntpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py index 79d557c80..5a5477e63 100644 --- a/collectors/python.d.plugin/ntpd/ntpd.chart.py +++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py @@ -9,10 +9,6 @@ import re from bases.FrameworkServices.SocketService import SocketService -# default module values -update_every = 1 -priority = 60000 -retries = 60 # NTP Control Message Protocol constants MODE = 6 @@ -54,13 +50,15 @@ ORDER = [ CHARTS = { 'sys_offset': { - 'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'], + 'options': [None, 'Combined offset of server relative to this host', 'milliseconds', + 'system', 'ntpd.sys_offset', 'area'], 'lines': [ ['offset', 'offset', 'absolute', 1, PRECISION] ] }, 'sys_jitter': { - 'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'], + 'options': [None, 'Combined system jitter and clock jitter', 'milliseconds', + 'system', 'ntpd.sys_jitter', 'line'], 'lines': [ ['sys_jitter', 'system', 'absolute', 1, PRECISION], ['clk_jitter', 'clock', 'absolute', 1, PRECISION] @@ -79,14 +77,14 @@ CHARTS = { ] }, 'sys_rootdelay': { - 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system', + 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'system', 'ntpd.sys_rootdelay', 'area'], 'lines': [ ['rootdelay', 'delay', 'absolute', 1, PRECISION] ] }, 'sys_rootdisp': { - 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system', + 'options': [None, 'Total root dispersion to the primary reference clock', 'milliseconds', 'system', 'ntpd.sys_rootdisp', 'area'], 'lines': [ ['rootdisp', 'dispersion', 'absolute', 1, PRECISION] @@ -115,27 +113,27 @@ CHARTS = { PEER_CHARTS = { 'peer_offset': { - 'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'], + 'options': [None, 'Filter offset', 'milliseconds', 'peers', 'ntpd.peer_offset', 'line'], 'lines': [] }, 'peer_delay': { - 'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'], + 'options': [None, 'Filter delay', 'milliseconds', 'peers', 'ntpd.peer_delay', 'line'], 'lines': [] }, 'peer_dispersion': { - 'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'], + 'options': [None, 'Filter dispersion', 'milliseconds', 'peers', 'ntpd.peer_dispersion', 'line'], 'lines': [] }, 'peer_jitter': { - 'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'], + 'options': [None, 'Filter jitter', 'milliseconds', 'peers', 'ntpd.peer_jitter', 'line'], 'lines': [] }, 'peer_xleave': { - 'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'], + 'options': [None, 'Interleave delay', 'milliseconds', 'peers', 'ntpd.peer_xleave', 'line'], 'lines': [] }, 'peer_rootdelay': { - 'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers', + 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'peers', 'ntpd.peer_rootdelay', 'line'], 'lines': [] }, @@ -235,7 +233,6 @@ class Service(SocketService): SocketService.__init__(self, configuration=configuration, name=name) self.order = list(ORDER) self.definitions = dict(CHARTS) - self.port = 'ntp' self.dgram_socket = True self.system = System() @@ -244,7 +241,6 @@ class Service(SocketService): self.retries = 0 self.show_peers = self.configuration.get('show_peers', False) self.peer_rescan = self.configuration.get('peer_rescan', 60) - if self.show_peers: self.definitions.update(PEER_CHARTS) diff --git a/collectors/python.d.plugin/ntpd/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf index 7adc4074b..80bd468d1 100644 --- a/collectors/python.d.plugin/ntpd/ntpd.conf +++ b/collectors/python.d.plugin/ntpd/ntpd.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # ---------------------------------------------------------------------- # JOBS (data collection sources) @@ -52,7 +50,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # # Additionally to the above, ntp also supports the following: # diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md index 06acfc297..48b611951 100644 --- a/collectors/python.d.plugin/nvidia_smi/README.md +++ b/collectors/python.d.plugin/nvidia_smi/README.md @@ -36,4 +36,5 @@ Sample: ```yaml poll_seconds: 1 -``` \ No newline at end of file +``` +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnvidia_smi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py index c3fff6219..7cb816c0d 100644 --- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py +++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py @@ -15,6 +15,8 @@ disabled_by_default = True NVIDIA_SMI = 'nvidia-smi' +BAD_VALUE = 'N/A' + EMPTY_ROW = '' EMPTY_ROW_LIMIT = 500 POLLER_BREAK_ROW = '' @@ -47,39 +49,39 @@ def gpu_charts(gpu): charts = { PCI_BANDWIDTH: { - 'options': [None, 'PCI Express Bandwidth Utilization', 'KB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'], + 'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'], 'lines': [ ['rx_util', 'rx', 'absolute', 1, 1], ['tx_util', 'tx', 'absolute', 1, -1], ] }, FAN_SPEED: { - 'options': [None, 'Fan Speed', '%', fam, 'nvidia_smi.fan_speed', 'line'], + 'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'], 'lines': [ ['fan_speed', 'speed'], ] }, GPU_UTIL: { - 'options': [None, 'GPU Utilization', '%', fam, 'nvidia_smi.gpu_utilization', 'line'], + 'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'], 'lines': [ ['gpu_util', 'utilization'], ] }, MEM_UTIL: { - 'options': [None, 'Memory Bandwidth Utilization', '%', fam, 'nvidia_smi.mem_utilization', 'line'], + 'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'], 'lines': [ ['memory_util', 'utilization'], ] }, ENCODER_UTIL: { - 'options': [None, 'Encoder/Decoder Utilization', '%', fam, 'nvidia_smi.encoder_utilization', 'line'], + 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization', 'line'], 'lines': [ ['encoder_util', 'encoder'], ['decoder_util', 'decoder'], ] }, MEM_ALLOCATED: { - 'options': [None, 'Memory Allocated', 'MB', fam, 'nvidia_smi.memory_allocated', 'line'], + 'options': [None, 'Memory Allocated', 'MiB', fam, 'nvidia_smi.memory_allocated', 'line'], 'lines': [ ['fb_memory_usage', 'used'], ] @@ -206,6 +208,15 @@ def handle_attr_error(method): return on_call +def handle_value_error(method): + def on_call(*args, **kwargs): + try: + return method(*args, **kwargs) + except ValueError: + return None + return on_call + + class GPU: def __init__(self, num, root): self.num = num @@ -272,6 +283,7 @@ class GPU: def mem_clock(self): return self.root.find('clocks').find('mem_clock').text.split()[0] + @handle_value_error @handle_attr_error def power_draw(self): return float(self.root.find('power_readings').find('power_draw').text.split()[0]) * 100 @@ -294,7 +306,9 @@ class GPU: 'power_draw': self.power_draw(), } - return dict(('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items() if v is not None) + return dict( + ('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items() if v is not None and v != BAD_VALUE + ) class Service(SimpleService): @@ -302,7 +316,6 @@ class Service(SimpleService): super(Service, self).__init__(configuration=configuration, name=name) self.order = list() self.definitions = dict() - poll = int(configuration.get('poll_seconds', 1)) self.poller = NvidiaSMIPoller(poll) diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf index e1bcf3faf..53e544a5d 100644 --- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf +++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, example also supports the following: diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md index 938535bca..629cc1539 100644 --- a/collectors/python.d.plugin/openldap/README.md +++ b/collectors/python.d.plugin/openldap/README.md @@ -55,3 +55,5 @@ openldap: ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fopenldap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/openldap/openldap.chart.py b/collectors/python.d.plugin/openldap/openldap.chart.py index 6342d3863..768ed01e8 100644 --- a/collectors/python.d.plugin/openldap/openldap.chart.py +++ b/collectors/python.d.plugin/openldap/openldap.chart.py @@ -11,8 +11,6 @@ except ImportError: from bases.FrameworkServices.SimpleService import SimpleService -# default module values (can be overridden per job in `config`) -priority = 60000 DEFAULT_SERVER = 'localhost' DEFAULT_PORT = '389' @@ -36,7 +34,7 @@ CHARTS = { ] }, 'bytes_sent': { - 'options': [None, 'Traffic', 'KB/s', 'ldap', 'openldap.traffic_stats', 'line'], + 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'], 'lines': [ ['bytes_sent', 'sent', 'incremental', 1, 1024] ] @@ -136,13 +134,11 @@ class Service(SimpleService): SimpleService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = CHARTS - self.server = configuration.get('server', DEFAULT_SERVER) self.port = configuration.get('port', DEFAULT_PORT) self.username = configuration.get('username') self.password = configuration.get('password') self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT) - self.alive = False self.conn = None diff --git a/collectors/python.d.plugin/openldap/openldap.conf b/collectors/python.d.plugin/openldap/openldap.conf index 662cc58c4..6182b3ee2 100644 --- a/collectors/python.d.plugin/openldap/openldap.conf +++ b/collectors/python.d.plugin/openldap/openldap.conf @@ -28,11 +28,9 @@ update_every: 10 # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -59,7 +57,7 @@ update_every: 10 # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md index be1ea279e..bcd1f00e3 100644 --- a/collectors/python.d.plugin/ovpn_status_log/README.md +++ b/collectors/python.d.plugin/ovpn_status_log/README.md @@ -30,3 +30,5 @@ default ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fovpn_status_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py index 64d7062d9..dc7a6002e 100644 --- a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py +++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py @@ -3,15 +3,18 @@ # Author: l2isbad # SPDX-License-Identifier: GPL-3.0-or-later -from re import compile as r_compile +import re from bases.FrameworkServices.SimpleService import SimpleService -priority = 60000 -retries = 60 + update_every = 10 -ORDER = ['users', 'traffic'] +ORDER = [ + 'users', + 'traffic', +] + CHARTS = { 'users': { 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'], @@ -20,15 +23,20 @@ CHARTS = { ] }, 'traffic': { - 'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'], + 'options': [None, 'OpenVPN Traffic', 'KiB/s', 'traffic', 'openvpn_status.traffic', 'area'], 'lines': [ - ['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10] + ['bytes_in', 'in', 'incremental', 1, 1 << 10], + ['bytes_out', 'out', 'incremental', -1, 1 << 10] ] } } -TLS_REGEX = r_compile(r'(?:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P\d+) (?P\d+)') -STATIC_KEY_REGEX = r_compile(r'TCP/[A-Z]+ (?P(?:read|write)) bytes,(?P\d+)') +TLS_REGEX = re.compile( + r'(?:[0-9a-f]+:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P\d+) (?P\d+)' +) +STATIC_KEY_REGEX = re.compile( + r'TCP/[A-Z]+ (?P(?:read|write)) bytes,(?P\d+)' +) class Service(SimpleService): diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf index 6fb35a530..1d71f6b8e 100644 --- a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf +++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, openvpn status log also supports the following: diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md index 66930463f..d3aa85a7c 100644 --- a/collectors/python.d.plugin/phpfpm/README.md +++ b/collectors/python.d.plugin/phpfpm/README.md @@ -32,9 +32,10 @@ priority : 90100 local: url : 'http://localhost/status' - retries : 10 ``` Without configuration, module attempts to connect to `http://localhost/status` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py index a3f0963fc..70091e233 100644 --- a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py +++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py @@ -9,20 +9,8 @@ import re from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 -# default job configuration (overridden by python.d.plugin) -# config = {'local': { -# 'update_every': update_every, -# 'retries': retries, -# 'priority': priority, -# 'url': 'http://localhost/status?full&json' -# }} - -# charts order (can be overridden if you want less charts, or different order) +REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)') POOL_INFO = [ ('active processes', 'active'), @@ -50,7 +38,14 @@ CALC = [ ('avg', average) ] -ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem'] +ORDER = [ + 'connections', + 'requests', + 'performance', + 'request_duration', + 'request_cpu', + 'request_mem', +] CHARTS = { 'connections': { @@ -85,7 +80,7 @@ CHARTS = { ] }, 'request_cpu': { - 'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'], + 'options': [None, 'PHP-FPM Request CPU', 'percentage', 'request CPU', 'phpfpm.request_cpu', 'line'], 'lines': [ ['minReqCpu', 'min'], ['maxReqCpu', 'max'], @@ -93,7 +88,7 @@ CHARTS = { ] }, 'request_mem': { - 'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'], + 'options': [None, 'PHP-FPM Request Memory', 'KB', 'request memory', 'phpfpm.request_mem', 'line'], 'lines': [ ['minReqMem', 'min', 'absolute', 1, 1024], ['maxReqMem', 'max', 'absolute', 1, 1024], @@ -106,14 +101,14 @@ CHARTS = { class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) - self.url = self.configuration.get('url', 'http://localhost/status?full&json') self.order = ORDER self.definitions = CHARTS - self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)') + self.url = self.configuration.get('url', 'http://localhost/status?full&json') self.json = '&json' in self.url or '?json' in self.url self.json_full = self.url.endswith(('?full&json', '?json&full')) - self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC - for metric, p_name in PER_PROCESS_INFO]) + self.if_all_processes_running = dict( + [(c_name + p_name, 0) for c_name, func in CALC for metric, p_name in PER_PROCESS_INFO] + ) def _get_data(self): """ @@ -124,7 +119,7 @@ class Service(UrlService): if not raw: return None - raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw) + raw_json = parse_raw_data_(is_json=self.json, raw_data=raw) # Per Pool info: active connections, requests and performance charts to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO) @@ -160,7 +155,7 @@ def fetch_data_(raw_data, metrics_list, pid=''): return result -def parse_raw_data_(is_json, regex, raw_data): +def parse_raw_data_(is_json, raw_data): """ :param is_json: bool :param regex: compiled regular expr @@ -174,4 +169,4 @@ def parse_raw_data_(is_json, regex, raw_data): return dict() else: raw_data = ' '.join(raw_data.split()) - return dict(regex.findall(raw_data)) + return dict(REGEX.findall(raw_data)) diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf index 571eb9156..d31853903 100644 --- a/collectors/python.d.plugin/phpfpm/phpfpm.conf +++ b/collectors/python.d.plugin/phpfpm/phpfpm.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, PHP-FPM also supports the following: diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md index f1338d576..8f289c8de 100644 --- a/collectors/python.d.plugin/portcheck/README.md +++ b/collectors/python.d.plugin/portcheck/README.md @@ -33,3 +33,5 @@ server: * Currently, the accuracy of the latency is low and should be used as reference only. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fportcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py index e86f82544..8479e38e4 100644 --- a/collectors/python.d.plugin/portcheck/portcheck.chart.py +++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py @@ -12,9 +12,6 @@ except ImportError: from bases.FrameworkServices.SimpleService import SimpleService -# default module values (can be overridden per job in `config`) -priority = 60000 -retries = 60 PORT_LATENCY = 'connect' @@ -26,7 +23,7 @@ ORDER = ['latency', 'status'] CHARTS = { 'latency': { - 'options': [None, 'TCP connect latency', 'ms', 'latency', 'portcheck.latency', 'line'], + 'options': [None, 'TCP connect latency', 'milliseconds', 'latency', 'portcheck.latency', 'line'], 'lines': [ [PORT_LATENCY, 'connect', 'absolute', 100, 1000] ] @@ -121,7 +118,7 @@ class Service(SimpleService): :return: dict """ - af, _, proto, _, sa = socket_config + _, _, _, _, sa = socket_config port = str(sa[1]) try: self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port)) diff --git a/collectors/python.d.plugin/portcheck/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf index b3dd8bd3f..df67824bd 100644 --- a/collectors/python.d.plugin/portcheck/portcheck.conf +++ b/collectors/python.d.plugin/portcheck/portcheck.conf @@ -27,6 +27,10 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes + # chart_cleanup sets the default chart cleanup interval in iterations. # A chart is marked as obsolete if it has not been updated # 'chart_cleanup' iterations in a row. @@ -60,7 +64,7 @@ chart_cleanup: 0 # # JOBs sharing a name are mutually exclusive # update_every: 1 # [optional] the JOB's data collection frequency # priority: 60000 # [optional] the JOB's order on the dashboard -# retries: 60 # [optional] the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # timeout: 1 # [optional] the socket timeout when connecting # host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name. # port: 22 # [required] the port number to check. Specify an integer, not service name. diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md index 77c95ff44..e2147ac91 100644 --- a/collectors/python.d.plugin/postfix/README.md +++ b/collectors/python.d.plugin/postfix/README.md @@ -13,3 +13,5 @@ It produces only two charts: Configuration is not needed. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/postfix/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py index bdbd0feea..b650514ee 100644 --- a/collectors/python.d.plugin/postfix/postfix.chart.py +++ b/collectors/python.d.plugin/postfix/postfix.chart.py @@ -5,13 +5,12 @@ from bases.FrameworkServices.ExecutableService import ExecutableService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 +POSTQUEUE_COMMAND = 'postqueue -p' -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['qemails', 'qsize'] +ORDER = [ + 'qemails', + 'qsize', +] CHARTS = { 'qemails': { @@ -21,7 +20,7 @@ CHARTS = { ] }, 'qsize': { - 'options': [None, 'Postfix Queue Emails Size', 'emails size in KB', 'queue', 'postfix.qsize', 'area'], + 'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'], 'lines': [ ['size', None, 'absolute'] ] @@ -32,9 +31,9 @@ CHARTS = { class Service(ExecutableService): def __init__(self, configuration=None, name=None): ExecutableService.__init__(self, configuration=configuration, name=name) - self.command = 'postqueue -p' self.order = ORDER self.definitions = CHARTS + self.command = POSTQUEUE_COMMAND def _get_data(self): """ diff --git a/collectors/python.d.plugin/postfix/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf index e0d5a5f83..a4d2472ee 100644 --- a/collectors/python.d.plugin/postfix/postfix.conf +++ b/collectors/python.d.plugin/postfix/postfix.conf @@ -28,11 +28,9 @@ update_every: 10 # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -59,7 +57,7 @@ update_every: 10 # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, postfix also supports the following: diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md index e7b108d36..9939a0c48 100644 --- a/collectors/python.d.plugin/postgres/README.md +++ b/collectors/python.d.plugin/postgres/README.md @@ -66,3 +66,5 @@ tcp: When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostgres%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py index 7f43877c3..e988eec36 100644 --- a/collectors/python.d.plugin/postgres/postgres.chart.py +++ b/collectors/python.d.plugin/postgres/postgres.chart.py @@ -16,13 +16,34 @@ except ImportError: from bases.FrameworkServices.SimpleService import SimpleService -# default module values -update_every = 1 -priority = 60000 -retries = 60 + +DEFAULT_PORT = 5432 +DEFAULT_USER = 'postgres' +DEFAULT_CONNECT_TIMEOUT = 2 # seconds +DEFAULT_STATEMENT_TIMEOUT = 5000 # ms + + +WAL = 'WAL' +ARCHIVE = 'ARCHIVE' +BACKENDS = 'BACKENDS' +TABLE_STATS = 'TABLE_STATS' +INDEX_STATS = 'INDEX_STATS' +DATABASE = 'DATABASE' +BGWRITER = 'BGWRITER' +LOCKS = 'LOCKS' +DATABASES = 'DATABASES' +STANDBY = 'STANDBY' +REPLICATION_SLOT = 'REPLICATION_SLOT' +STANDBY_DELTA = 'STANDBY_DELTA' +REPSLOT_FILES = 'REPSLOT_FILES' +IF_SUPERUSER = 'IF_SUPERUSER' +SERVER_VERSION = 'SERVER_VERSION' +AUTOVACUUM = 'AUTOVACUUM' +DIFF_LSN = 'DIFF_LSN' +WAL_WRITES = 'WAL_WRITES' METRICS = { - 'DATABASE': [ + DATABASE: [ 'connections', 'xact_commit', 'xact_rollback', @@ -38,32 +59,32 @@ METRICS = { 'temp_bytes', 'size' ], - 'BACKENDS': [ + BACKENDS: [ 'backends_active', 'backends_idle' ], - 'INDEX_STATS': [ + INDEX_STATS: [ 'index_count', 'index_size' ], - 'TABLE_STATS': [ + TABLE_STATS: [ 'table_size', 'table_count' ], - 'WAL': [ + WAL: [ 'written_wal', 'recycled_wal', 'total_wal' ], - 'WAL_WRITES': [ + WAL_WRITES: [ 'wal_writes' ], - 'ARCHIVE': [ + ARCHIVE: [ 'ready_count', 'done_count', 'file_count' ], - 'BGWRITER': [ + BGWRITER: [ 'checkpoint_scheduled', 'checkpoint_requested', 'buffers_checkpoint', @@ -73,7 +94,7 @@ METRICS = { 'buffers_alloc', 'buffers_backend_fsync' ], - 'LOCKS': [ + LOCKS: [ 'ExclusiveLock', 'RowShareLock', 'SIReadLock', @@ -84,27 +105,61 @@ METRICS = { 'ShareLock', 'RowExclusiveLock' ], - 'AUTOVACUUM': [ + AUTOVACUUM: [ 'analyze', 'vacuum_analyze', 'vacuum', 'vacuum_freeze', 'brin_summarize' ], - 'STANDBY_DELTA': [ + STANDBY_DELTA: [ 'sent_delta', 'write_delta', 'flush_delta', 'replay_delta' ], - 'REPSLOT_FILES': [ + REPSLOT_FILES: [ 'replslot_wal_keep', 'replslot_files' ] } -QUERIES = { - 'WAL': """ +NO_VERSION = 0 +DEFAULT = 'DEFAULT' +V96 = 'V96' +V10 = 'V10' +V11 = 'V11' + + +QUERY_WAL = { + DEFAULT: """ +SELECT + count(*) as total_wal, + count(*) FILTER (WHERE type = 'recycled') AS recycled_wal, + count(*) FILTER (WHERE type = 'written') AS written_wal +FROM + (SELECT + wal.name, + pg_walfile_name( + CASE pg_is_in_recovery() + WHEN true THEN NULL + ELSE pg_current_wal_lsn() + END ), + CASE + WHEN wal.name > pg_walfile_name( + CASE pg_is_in_recovery() + WHEN true THEN NULL + ELSE pg_current_wal_lsn() + END ) THEN 'recycled' + ELSE 'written' + END AS type + FROM pg_catalog.pg_ls_dir('pg_wal') AS wal(name) + WHERE name ~ '^[0-9A-F]{24}$' + ORDER BY + (pg_stat_file('pg_wal/'||name)).modification, + wal.name DESC) sub; +""", + V96: """ SELECT count(*) as total_wal, count(*) FILTER (WHERE type = 'recycled') AS recycled_wal, @@ -112,34 +167,49 @@ SELECT FROM (SELECT wal.name, - pg_{0}file_name( + pg_xlogfile_name( CASE pg_is_in_recovery() WHEN true THEN NULL - ELSE pg_current_{0}_{1}() + ELSE pg_current_xlog_location() END ), CASE - WHEN wal.name > pg_{0}file_name( + WHEN wal.name > pg_xlogfile_name( CASE pg_is_in_recovery() WHEN true THEN NULL - ELSE pg_current_{0}_{1}() + ELSE pg_current_xlog_location() END ) THEN 'recycled' ELSE 'written' END AS type - FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name) - WHERE name ~ '^[0-9A-F]{{24}}$' + FROM pg_catalog.pg_ls_dir('pg_xlog') AS wal(name) + WHERE name ~ '^[0-9A-F]{24}$' ORDER BY - (pg_stat_file('pg_{0}/'||name)).modification, + (pg_stat_file('pg_xlog/'||name)).modification, wal.name DESC) sub; """, - 'ARCHIVE': """ +} + +QUERY_ARCHIVE = { + DEFAULT: """ +SELECT + CAST(COUNT(*) AS INT) AS file_count, + CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count, + CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count +FROM + pg_catalog.pg_ls_dir('pg_wal/archive_status') AS archive_files (archive_file); +""", + V96: """ SELECT CAST(COUNT(*) AS INT) AS file_count, CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count, CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count FROM - pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file); + pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file); + """, - 'BACKENDS': """ +} + +QUERY_BACKEND = { + DEFAULT: """ SELECT count(*) - (SELECT count(*) FROM pg_stat_activity @@ -151,21 +221,30 @@ SELECT AS backends_idle FROM pg_stat_activity; """, - 'TABLE_STATS': """ +} + +QUERY_TABLE_STATS = { + DEFAULT: """ SELECT ((sum(relpages) * 8) * 1024) AS table_size, count(1) AS table_count FROM pg_class WHERE relkind IN ('r', 't'); """, - 'INDEX_STATS': """ +} + +QUERY_INDEX_STATS = { + DEFAULT: """ SELECT ((sum(relpages) * 8) * 1024) AS index_size, count(1) AS index_count FROM pg_class WHERE relkind = 'i'; """, - 'DATABASE': """ +} + +QUERY_DATABASE = { + DEFAULT: """ SELECT datname AS database_name, numbackends AS connections, @@ -185,7 +264,10 @@ SELECT FROM pg_stat_database WHERE datname IN %(databases)s ; """, - 'BGWRITER': """ +} + +QUERY_BGWRITER = { + DEFAULT: """ SELECT checkpoints_timed AS checkpoint_scheduled, checkpoints_req AS checkpoint_requested, @@ -197,7 +279,10 @@ SELECT buffers_backend_fsync FROM pg_stat_bgwriter; """, - 'LOCKS': """ +} + +QUERY_LOCKS = { + DEFAULT: """ SELECT pg_database.datname as database_name, mode, @@ -208,7 +293,10 @@ INNER JOIN pg_database GROUP BY datname, mode ORDER BY datname, mode; """, - 'FIND_DATABASES': """ +} + +QUERY_DATABASES = { + DEFAULT: """ SELECT datname FROM pg_stat_database @@ -217,48 +305,129 @@ WHERE (SELECT current_user), datname, 'connect') AND NOT datname ~* '^template\d '; """, - 'FIND_STANDBY': """ +} + +QUERY_STANDBY = { + DEFAULT: """ SELECT application_name FROM pg_stat_replication WHERE application_name IS NOT NULL GROUP BY application_name; """, - 'FIND_REPLICATION_SLOT': """ +} + +QUERY_REPLICATION_SLOT = { + DEFAULT: """ SELECT slot_name FROM pg_replication_slots; +""" +} + +QUERY_STANDBY_DELTA = { + DEFAULT: """ +SELECT + application_name, + pg_wal_lsn_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_wal_receive_lsn() + ELSE pg_current_wal_lsn() + END, + sent_lsn) AS sent_delta, + pg_wal_lsn_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_wal_receive_lsn() + ELSE pg_current_wal_lsn() + END, + write_lsn) AS write_delta, + pg_wal_lsn_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_wal_receive_lsn() + ELSE pg_current_wal_lsn() + END, + flush_lsn) AS flush_delta, + pg_wal_lsn_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_wal_receive_lsn() + ELSE pg_current_wal_lsn() + END, + replay_lsn) AS replay_delta +FROM pg_stat_replication +WHERE application_name IS NOT NULL; """, - 'STANDBY_DELTA': """ + V96: """ SELECT application_name, - pg_{0}_{1}_diff( + pg_xlog_location_diff( CASE pg_is_in_recovery() - WHEN true THEN pg_last_{0}_receive_{1}() - ELSE pg_current_{0}_{1}() + WHEN true THEN pg_last_xlog_receive_location() + ELSE pg_current_xlog_location() END, - sent_{1}) AS sent_delta, - pg_{0}_{1}_diff( + sent_location) AS sent_delta, + pg_xlog_location_diff( CASE pg_is_in_recovery() - WHEN true THEN pg_last_{0}_receive_{1}() - ELSE pg_current_{0}_{1}() + WHEN true THEN pg_last_xlog_receive_location() + ELSE pg_current_xlog_location() END, - write_{1}) AS write_delta, - pg_{0}_{1}_diff( + write_location) AS write_delta, + pg_xlog_location_diff( CASE pg_is_in_recovery() - WHEN true THEN pg_last_{0}_receive_{1}() - ELSE pg_current_{0}_{1}() + WHEN true THEN pg_last_xlog_receive_location() + ELSE pg_current_xlog_location() END, - flush_{1}) AS flush_delta, - pg_{0}_{1}_diff( + flush_location) AS flush_delta, + pg_xlog_location_diff( CASE pg_is_in_recovery() - WHEN true THEN pg_last_{0}_receive_{1}() - ELSE pg_current_{0}_{1}() + WHEN true THEN pg_last_xlog_receive_location() + ELSE pg_current_xlog_location() END, - replay_{1}) AS replay_delta + replay_location) AS replay_delta FROM pg_stat_replication WHERE application_name IS NOT NULL; """, - 'REPSLOT_FILES': """ +} + +QUERY_REPSLOT_FILES = { + DEFAULT: """ +WITH wal_size AS ( + SELECT + setting::int AS val + FROM pg_settings + WHERE name = 'wal_segment_size' + ) +SELECT + slot_name, + slot_type, + replslot_wal_keep, + count(slot_file) AS replslot_files +FROM + (SELECT + slot.slot_name, + CASE + WHEN slot_file <> 'state' THEN 1 + END AS slot_file , + slot_type, + COALESCE ( + floor( + (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn) + - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val) + ),0) AS replslot_wal_keep + FROM pg_replication_slots slot + LEFT JOIN ( + SELECT + slot2.slot_name, + pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file + FROM pg_replication_slots slot2 + ) files (slot_name, slot_file) + ON slot.slot_name = files.slot_name + CROSS JOIN wal_size s + ) AS d +GROUP BY + slot_name, + slot_type, + replslot_wal_keep; +""", + V10: """ WITH wal_size AS ( SELECT current_setting('wal_block_size')::INT * setting::INT AS val @@ -297,13 +466,22 @@ GROUP BY slot_type, replslot_wal_keep; """, - 'IF_SUPERUSER': """ +} + +QUERY_SUPERUSER = { + DEFAULT: """ SELECT current_setting('is_superuser') = 'on' AS is_superuser; """, - 'DETECT_SERVER_VERSION': """ +} + +QUERY_SHOW_VERSION = { + DEFAULT: """ SHOW server_version_num; """, - 'AUTOVACUUM': """ +} + +QUERY_AUTOVACUUM = { + DEFAULT: """ SELECT count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze, count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze, @@ -315,23 +493,78 @@ SELECT FROM pg_stat_activity WHERE query NOT LIKE '%%pg_stat_activity%%'; """, - 'DIFF_LSN': """ +} + +QUERY_DIFF_LSN = { + DEFAULT: """ SELECT - pg_{0}_{1}_diff( + pg_wal_lsn_diff( CASE pg_is_in_recovery() - WHEN true THEN pg_last_{0}_receive_{1}() - ELSE pg_current_{0}_{1}() + WHEN true THEN pg_last_wal_receive_lsn() + ELSE pg_current_wal_lsn() END, '0/0') as wal_writes ; -""" +""", + V96: """ +SELECT + pg_xlog_location_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_xlog_receive_location() + ELSE pg_current_xlog_location() + END, + '0/0') as wal_writes ; +""", } -QUERY_STATS = { - QUERIES['DATABASE']: METRICS['DATABASE'], - QUERIES['BACKENDS']: METRICS['BACKENDS'], - QUERIES['LOCKS']: METRICS['LOCKS'] -} +def query_factory(name, version=NO_VERSION): + if name == BACKENDS: + return QUERY_BACKEND[DEFAULT] + elif name == TABLE_STATS: + return QUERY_TABLE_STATS[DEFAULT] + elif name == INDEX_STATS: + return QUERY_INDEX_STATS[DEFAULT] + elif name == DATABASE: + return QUERY_DATABASE[DEFAULT] + elif name == BGWRITER: + return QUERY_BGWRITER[DEFAULT] + elif name == LOCKS: + return QUERY_LOCKS[DEFAULT] + elif name == DATABASES: + return QUERY_DATABASES[DEFAULT] + elif name == STANDBY: + return QUERY_STANDBY[DEFAULT] + elif name == REPLICATION_SLOT: + return QUERY_REPLICATION_SLOT[DEFAULT] + elif name == IF_SUPERUSER: + return QUERY_SUPERUSER[DEFAULT] + elif name == SERVER_VERSION: + return QUERY_SHOW_VERSION[DEFAULT] + elif name == AUTOVACUUM: + return QUERY_AUTOVACUUM[DEFAULT] + elif name == WAL: + if version < 100000: + return QUERY_WAL[V96] + return QUERY_WAL[DEFAULT] + elif name == ARCHIVE: + if version < 100000: + return QUERY_ARCHIVE[V96] + return QUERY_ARCHIVE[DEFAULT] + elif name == STANDBY_DELTA: + if version < 100000: + return QUERY_STANDBY_DELTA[V96] + return QUERY_STANDBY_DELTA[DEFAULT] + elif name == REPSLOT_FILES: + if version < 110000: + return QUERY_REPSLOT_FILES[V10] + return QUERY_REPSLOT_FILES[DEFAULT] + elif name == DIFF_LSN: + if version < 100000: + return QUERY_DIFF_LSN[V96] + return QUERY_DIFF_LSN[DEFAULT] + + raise ValueError('unknown query') + ORDER = [ 'db_stat_temp_files', @@ -403,7 +636,7 @@ CHARTS = { ] }, 'db_stat_temp_bytes': { - 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes', + 'options': [None, 'Temp files written to disk', 'KiB/s', 'db statistics', 'postgres.db_stat_temp_bytes', 'line'], 'lines': [ ['temp_bytes', 'size', 'incremental', 1, 1024] @@ -417,7 +650,7 @@ CHARTS = { ] }, 'database_size': { - 'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'], + 'options': [None, 'Database size', 'MiB', 'database size', 'postgres.db_size', 'stacked'], 'lines': [ ] }, @@ -436,7 +669,7 @@ CHARTS = { ] }, 'index_size': { - 'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'], + 'options': [None, 'Indexes size', 'MiB', 'indexes', 'postgres.index_size', 'line'], 'lines': [ ['index_size', 'size', 'absolute', 1, 1024 * 1024] ] @@ -448,7 +681,7 @@ CHARTS = { ] }, 'table_size': { - 'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'], + 'options': [None, 'Tables size', 'MiB', 'tables', 'postgres.table_size', 'line'], 'lines': [ ['table_size', 'size', 'absolute', 1, 1024 * 1024] ] @@ -462,7 +695,7 @@ CHARTS = { ] }, 'wal_writes': { - 'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'], + 'options': [None, 'Write-Ahead Logs', 'KiB/s', 'wal_writes', 'postgres.wal_writes', 'line'], 'lines': [ ['wal_writes', 'writes', 'incremental', 1, 1024] ] @@ -483,20 +716,20 @@ CHARTS = { ] }, 'stat_bgwriter_alloc': { - 'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'], + 'options': [None, 'Buffers allocated', 'KiB/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'], 'lines': [ ['buffers_alloc', 'alloc', 'incremental', 1, 1024] ] }, 'stat_bgwriter_checkpoint': { - 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter', + 'options': [None, 'Buffers written during checkpoints', 'KiB/s', 'bgwriter', 'postgres.stat_bgwriter_checkpoint', 'line'], 'lines': [ ['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024] ] }, 'stat_bgwriter_backend': { - 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter', + 'options': [None, 'Buffers written directly by a backend', 'KiB/s', 'bgwriter', 'postgres.stat_bgwriter_backend', 'line'], 'lines': [ ['buffers_backend', 'backend', 'incremental', 1, 1024] @@ -509,7 +742,7 @@ CHARTS = { ] }, 'stat_bgwriter_bgwriter': { - 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter', + 'options': [None, 'Buffers written by the background writer', 'KiB/s', 'bgwriter', 'postgres.bgwriter_bgwriter', 'line'], 'lines': [ ['buffers_clean', 'clean', 'incremental', 1, 1024] @@ -533,7 +766,7 @@ CHARTS = { ] }, 'standby_delta': { - 'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'], + 'options': [None, 'Standby delta', 'KiB', 'replication delta', 'postgres.standby_delta', 'line'], 'lines': [ ['sent_delta', 'sent delta', 'absolute', 1, 1024], ['write_delta', 'write delta', 'absolute', 1, 1024], @@ -554,186 +787,218 @@ CHARTS = { class Service(SimpleService): def __init__(self, configuration=None, name=None): SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER[:] + self.order = list(ORDER) self.definitions = deepcopy(CHARTS) - self.table_stats = configuration.pop('table_stats', False) - self.index_stats = configuration.pop('index_stats', False) - self.database_poll = configuration.pop('database_poll', None) + self.do_table_stats = configuration.pop('table_stats', False) + self.do_index_stats = configuration.pop('index_stats', False) + self.databases_to_poll = configuration.pop('database_poll', None) + self.statement_timeout = configuration.pop('statement_timeout', DEFAULT_STATEMENT_TIMEOUT) self.configuration = configuration - self.connection = False + self.conn = None self.server_version = None - self.data = dict() - self.locks_zeroed = dict() + self.is_superuser = False + self.alive = False self.databases = list() self.secondaries = list() self.replication_slots = list() - self.queries = QUERY_STATS.copy() - - def _connect(self): - params = dict(user='postgres', - database=None, - password=None, - host=None, - port=5432) - params.update(self.configuration) - - if not self.connection: - try: - self.connection = psycopg2.connect(**params) - self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT) - self.connection.set_session(readonly=True) - except OperationalError as error: - return False, str(error) - return True, True + self.queries = dict() + self.data = dict() + + def reconnect(self): + return self.connect() + + def connect(self): + if self.conn: + self.conn.close() + self.conn = None + + try: + params = dict( + host=None, + port=DEFAULT_PORT, + database=None, + user=DEFAULT_USER, + password=None, + connect_timeout=DEFAULT_CONNECT_TIMEOUT, + options='-c statement_timeout={0}'.format(self.statement_timeout), + ) + params.update(self.configuration) + + self.conn = psycopg2.connect(**params) + self.conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT) + self.conn.set_session(readonly=True) + except OperationalError as error: + self.error(error) + self.alive = False + else: + self.alive = True + + return self.alive def check(self): if not PSYCOPG2: - self.error('\'python-psycopg2\' module is needed to use postgres.chart.py') + self.error("'python-psycopg2' package is needed to use postgres module") return False - result, error = self._connect() - if not result: - conf = dict((k, (lambda k, v: v if k != 'password' else '*****')(k, v)) - for k, v in self.configuration.items()) - self.error('Failed to connect to %s. Error: %s' % (str(conf), error)) + + if not self.connect(): + self.error('failed to connect to {0}'.format(hide_password(self.configuration))) return False + try: - cursor = self.connection.cursor() - self.databases = discover_databases_(cursor, QUERIES['FIND_DATABASES']) - is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER']) - self.secondaries = discover_secondaries_(cursor, QUERIES['FIND_STANDBY']) - self.server_version = detect_server_version(cursor, QUERIES['DETECT_SERVER_VERSION']) - if self.server_version >= 94000: - self.replication_slots = discover_replication_slots_(cursor, QUERIES['FIND_REPLICATION_SLOT']) - cursor.close() - - if self.database_poll and isinstance(self.database_poll, str): - self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] \ - or self.databases - - self.locks_zeroed = populate_lock_types(self.databases) - self.add_additional_queries_(is_superuser) - self.create_dynamic_charts_() - return True + self.check_queries() except Exception as error: - self.error(str(error)) + self.error(error) return False - def add_additional_queries_(self, is_superuser): + self.populate_queries() + self.create_dynamic_charts() - if self.server_version >= 100000: - wal = 'wal' - lsn = 'lsn' - else: - wal = 'xlog' - lsn = 'location' - self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER'] - self.queries[QUERIES['DIFF_LSN'].format(wal, lsn)] = METRICS['WAL_WRITES'] - self.queries[QUERIES['STANDBY_DELTA'].format(wal, lsn)] = METRICS['STANDBY_DELTA'] - - if self.index_stats: - self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS'] - if self.table_stats: - self.queries[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS'] - if is_superuser: - self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE'] - if self.server_version >= 90400: - self.queries[QUERIES['WAL'].format(wal, lsn)] = METRICS['WAL'] - if self.server_version >= 100000: - self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES'] - if self.server_version >= 90400: - self.queries[QUERIES['AUTOVACUUM']] = METRICS['AUTOVACUUM'] + return True - def create_dynamic_charts_(self): + def get_data(self): + if not self.alive and not self.reconnect(): + return None - for database_name in self.databases[::-1]: - self.definitions['database_size']['lines'].append( - [database_name + '_size', database_name, 'absolute', 1, 1024 * 1024]) - for chart_name in [name for name in self.order if name.startswith('db_stat')]: - add_database_stat_chart_(order=self.order, definitions=self.definitions, - name=chart_name, database_name=database_name) + try: + cursor = self.conn.cursor(cursor_factory=DictCursor) - add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name) + self.data.update(zero_lock_types(self.databases)) - for application_name in self.secondaries[::-1]: - add_replication_delta_chart_( - order=self.order, - definitions=self.definitions, - name='standby_delta', - application_name=application_name) + for query, metrics in self.queries.items(): + self.query_stats(cursor, query, metrics) - for slot_name in self.replication_slots[::-1]: - add_replication_slot_chart_( - order=self.order, - definitions=self.definitions, - name='replication_slot', - slot_name=slot_name) - - def _get_data(self): - result, _ = self._connect() - if result: - cursor = self.connection.cursor(cursor_factory=DictCursor) - try: - self.data.update(self.locks_zeroed) - for query, metrics in self.queries.items(): - self.query_stats_(cursor, query, metrics) - - except OperationalError: - self.connection = False - cursor.close() - return None - else: - cursor.close() - return self.data - else: + except OperationalError: + self.alive = False return None - def query_stats_(self, cursor, query, metrics): + cursor.close() + + return self.data + + def query_stats(self, cursor, query, metrics): cursor.execute(query, dict(databases=tuple(self.databases))) + for row in cursor: for metric in metrics: + # databases if 'database_name' in row: dimension_id = '_'.join([row['database_name'], metric]) + # secondaries elif 'application_name' in row: dimension_id = '_'.join([row['application_name'], metric]) + # replication slots elif 'slot_name' in row: dimension_id = '_'.join([row['slot_name'], metric]) + # other else: dimension_id = metric + if metric in row: if row[metric] is not None: self.data[dimension_id] = int(row[metric]) elif 'locks_count' in row: - self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0 + if metric == row['mode']: + self.data[dimension_id] = row['locks_count'] + def check_queries(self): + cursor = self.conn.cursor() -def discover_databases_(cursor, query): - cursor.execute(query) - result = list() - for db in [database[0] for database in cursor]: - if db not in result: - result.append(db) - return result + self.server_version = detect_server_version(cursor, query_factory(SERVER_VERSION)) + self.debug('server version: {0}'.format(self.server_version)) + self.is_superuser = check_if_superuser(cursor, query_factory(IF_SUPERUSER)) + self.debug('superuser: {0}'.format(self.is_superuser)) -def discover_secondaries_(cursor, query): - cursor.execute(query) - result = list() - for sc in [standby[0] for standby in cursor]: - if sc not in result: - result.append(sc) - return result + self.databases = discover(cursor, query_factory(DATABASES)) + self.debug('discovered databases {0}'.format(self.databases)) + if self.databases_to_poll: + to_poll = self.databases_to_poll.split() + self.databases = [db for db in self.databases if db in to_poll] or self.databases + + self.secondaries = discover(cursor, query_factory(STANDBY)) + self.debug('discovered secondaries: {0}'.format(self.secondaries)) + + if self.server_version >= 94000: + self.replication_slots = discover(cursor, query_factory(REPLICATION_SLOT)) + self.debug('discovered replication slots: {0}'.format(self.replication_slots)) + + cursor.close() + + def populate_queries(self): + self.queries[query_factory(DATABASE)] = METRICS[DATABASE] + self.queries[query_factory(BACKENDS)] = METRICS[BACKENDS] + self.queries[query_factory(LOCKS)] = METRICS[LOCKS] + self.queries[query_factory(BGWRITER)] = METRICS[BGWRITER] + self.queries[query_factory(DIFF_LSN, self.server_version)] = METRICS[WAL_WRITES] + self.queries[query_factory(STANDBY_DELTA, self.server_version)] = METRICS[STANDBY_DELTA] + + if self.do_index_stats: + self.queries[query_factory(INDEX_STATS)] = METRICS[INDEX_STATS] + if self.do_table_stats: + self.queries[query_factory(TABLE_STATS)] = METRICS[TABLE_STATS] + + if self.is_superuser: + self.queries[query_factory(ARCHIVE, self.server_version)] = METRICS[ARCHIVE] + + if self.server_version >= 90400: + self.queries[query_factory(WAL, self.server_version)] = METRICS[WAL] + + if self.server_version >= 100000: + self.queries[query_factory(REPSLOT_FILES, self.server_version)] = METRICS[REPSLOT_FILES] + + if self.server_version >= 90400: + self.queries[query_factory(AUTOVACUUM)] = METRICS[AUTOVACUUM] + + def create_dynamic_charts(self): + for database_name in self.databases[::-1]: + dim = [ + database_name + '_size', + database_name, + 'absolute', + 1, + 1024 * 1024, + ] + self.definitions['database_size']['lines'].append(dim) + for chart_name in [name for name in self.order if name.startswith('db_stat')]: + add_database_stat_chart( + order=self.order, + definitions=self.definitions, + name=chart_name, + database_name=database_name, + ) + add_database_lock_chart( + order=self.order, + definitions=self.definitions, + database_name=database_name, + ) + + for application_name in self.secondaries[::-1]: + add_replication_delta_chart( + order=self.order, + definitions=self.definitions, + name='standby_delta', + application_name=application_name, + ) + + for slot_name in self.replication_slots[::-1]: + add_replication_slot_chart( + order=self.order, + definitions=self.definitions, + name='replication_slot', + slot_name=slot_name, + ) -def discover_replication_slots_(cursor, query): +def discover(cursor, query): cursor.execute(query) result = list() - for slot in [replication_slot[0] for replication_slot in cursor]: - if slot not in result: - result.append(slot) + for v in [value[0] for value in cursor]: + if v not in result: + result.append(v) return result -def check_if_superuser_(cursor, query): +def check_if_superuser(cursor, query): cursor.execute(query) return cursor.fetchone()[0] @@ -743,7 +1008,7 @@ def detect_server_version(cursor, query): return int(cursor.fetchone()[0]) -def populate_lock_types(databases): +def zero_lock_types(databases): result = dict() for database in databases: for lock_type in METRICS['LOCKS']: @@ -753,7 +1018,11 @@ def populate_lock_types(databases): return result -def add_database_lock_chart_(order, definitions, database_name): +def hide_password(config): + return dict((k, v if k != 'password' else '*****') for k, v in config.items()) + + +def add_database_lock_chart(order, definitions, database_name): def create_lines(database): result = list() for lock_type in METRICS['LOCKS']: @@ -770,7 +1039,7 @@ def add_database_lock_chart_(order, definitions, database_name): } -def add_database_stat_chart_(order, definitions, name, database_name): +def add_database_stat_chart(order, definitions, name, database_name): def create_lines(database, lines): result = list() for line in lines: @@ -787,7 +1056,7 @@ def add_database_stat_chart_(order, definitions, name, database_name): 'lines': create_lines(database_name, chart_template['lines'])} -def add_replication_delta_chart_(order, definitions, name, application_name): +def add_replication_delta_chart(order, definitions, name, application_name): def create_lines(standby, lines): result = list() for line in lines: @@ -799,13 +1068,13 @@ def add_replication_delta_chart_(order, definitions, name, application_name): chart_name = '_'.join([application_name, name]) position = order.index('database_size') order.insert(position, chart_name) - name, title, units, family, context, chart_type = chart_template['options'] + name, title, units, _, context, chart_type = chart_template['options'] definitions[chart_name] = { 'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type], 'lines': create_lines(application_name, chart_template['lines'])} -def add_replication_slot_chart_(order, definitions, name, slot_name): +def add_replication_slot_chart(order, definitions, name, slot_name): def create_lines(slot, lines): result = list() for line in lines: @@ -817,7 +1086,7 @@ def add_replication_slot_chart_(order, definitions, name, slot_name): chart_name = '_'.join([slot_name, name]) position = order.index('database_size') order.insert(position, chart_name) - name, title, units, family, context, chart_type = chart_template['options'] + name, title, units, _, context, chart_type = chart_template['options'] definitions[chart_name] = { 'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type], 'lines': create_lines(slot_name, chart_template['lines'])} diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf index b69ca3717..cde698f3c 100644 --- a/collectors/python.d.plugin/postgres/postgres.conf +++ b/collectors/python.d.plugin/postgres/postgres.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,18 +56,20 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # A single connection is required in order to pull statistics. # # Connections can be configured with the following options: # -# database : 'example_db_name' -# user : 'example_user' -# password : 'example_pass' -# host : 'localhost' -# port : 5432 +# database : 'example_db_name' +# user : 'example_user' +# password : 'example_pass' +# host : 'localhost' +# port : 5432 +# connect_timeout : 2 # in seconds, default is 2 +# statement_timeout : 2000 # in ms, default is 2000 # # Additionally, the following options allow selective disabling of charts # diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md index 3c4b145e0..61aa5f6b7 100644 --- a/collectors/python.d.plugin/powerdns/README.md +++ b/collectors/python.d.plugin/powerdns/README.md @@ -75,3 +75,5 @@ local: ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpowerdns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py index 4264621b2..7ed1554f6 100644 --- a/collectors/python.d.plugin/powerdns/powerdns.chart.py +++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py @@ -8,11 +8,14 @@ from json import loads from bases.FrameworkServices.UrlService import UrlService -priority = 60000 -retries = 60 -# update_every = 3 -ORDER = ['questions', 'cache_usage', 'cache_size', 'latency'] +ORDER = [ + 'questions', + 'cache_usage', + 'cache_size', + 'latency', +] + CHARTS = { 'questions': { 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'], diff --git a/collectors/python.d.plugin/powerdns/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf index ca6200df1..559bf175e 100644 --- a/collectors/python.d.plugin/powerdns/powerdns.conf +++ b/collectors/python.d.plugin/powerdns/powerdns.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, apache also supports the following: diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md index 02388276e..6e5a2127f 100644 --- a/collectors/python.d.plugin/proxysql/README.md +++ b/collectors/python.d.plugin/proxysql/README.md @@ -60,3 +60,5 @@ tcpipv4: If no configuration is given, module will fail to run. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fproxysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py index f7e3d49f9..c97147486 100644 --- a/collectors/python.d.plugin/proxysql/proxysql.chart.py +++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py @@ -5,11 +5,6 @@ from bases.FrameworkServices.MySQLService import MySQLService -# default module values (can be overridden per job in `config`) -# update_every = 3 -priority = 60000 -retries = 60 - def query(table, *params): return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params)) @@ -133,8 +128,8 @@ CHARTS = { 'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth', 'proxysql.pool_overall_net', 'area'], 'lines': [ - ['bytes_data_recv', 'in', 'incremental', 8, 1024], - ['bytes_data_sent', 'out', 'incremental', -8, 1024] + ['bytes_data_recv', 'in', 'incremental', 8, 1000], + ['bytes_data_sent', 'out', 'incremental', -8, 1000] ] }, 'questions': { @@ -156,7 +151,7 @@ CHARTS = { ] }, 'pool_latency': { - 'options': [None, 'ProxySQL Backend Latency', 'ms', 'latency', 'proxysql.latency', 'line'], + 'options': [None, 'ProxySQL Backend Latency', 'milliseconds', 'latency', 'proxysql.latency', 'line'], 'lines': [] }, 'connections': { @@ -194,7 +189,7 @@ CHARTS = { 'lines': [] }, 'commands_duration': { - 'options': [None, 'ProxySQL Commands Duration', 'ms', 'commands', 'proxysql.commands_duration', 'line'], + 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration', 'line'], 'lines': [] } } diff --git a/collectors/python.d.plugin/proxysql/proxysql.conf b/collectors/python.d.plugin/proxysql/proxysql.conf index d29c2e5be..3c503a895 100644 --- a/collectors/python.d.plugin/proxysql/proxysql.conf +++ b/collectors/python.d.plugin/proxysql/proxysql.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, proxysql also supports the following: diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md index 8304c831e..b97eb70c5 100644 --- a/collectors/python.d.plugin/puppet/README.md +++ b/collectors/python.d.plugin/puppet/README.md @@ -26,16 +26,13 @@ puppetdb: tls_cert_file: /path/to/client.crt tls_key_file: /path/to/client.key autodetection_retry: 1 - retries: 3600 puppetserver: url: 'https://fqdn.example.com:8140' autodetection_retry: 1 - retries: 3600 ``` -When no configuration is given then `https://fqdn.example.com:8140` is -tried without any retries. +When no configuration is given, module uses `https://fqdn.example.com:8140`. ### notes @@ -46,3 +43,5 @@ tried without any retries. to default PuppetDB configuration though. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpuppet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py index 5c8e48bd9..30e219da4 100644 --- a/collectors/python.d.plugin/puppet/puppet.chart.py +++ b/collectors/python.d.plugin/puppet/puppet.chart.py @@ -11,29 +11,31 @@ # and tls_cert_file options then. # -from bases.FrameworkServices.UrlService import UrlService -from json import loads import socket +from json import loads + +from bases.FrameworkServices.UrlService import UrlService + update_every = 5 -priority = 60000 -# very long clojure-based service startup time -retries = 180 -MB = 1048576 + +MiB = 1 << 20 CPU_SCALE = 1000 + ORDER = [ 'jvm_heap', 'jvm_nonheap', 'cpu', 'fd_open', ] + CHARTS = { 'jvm_heap': { - 'options': [None, 'JVM Heap', 'MB', 'resources', 'puppet.jvm', 'area'], + 'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm', 'area'], 'lines': [ - ['jvm_heap_committed', 'committed', 'absolute', 1, MB], - ['jvm_heap_used', 'used', 'absolute', 1, MB], + ['jvm_heap_committed', 'committed', 'absolute', 1, MiB], + ['jvm_heap_used', 'used', 'absolute', 1, MiB], ], 'variables': [ ['jvm_heap_max'], @@ -41,10 +43,10 @@ CHARTS = { ], }, 'jvm_nonheap': { - 'options': [None, 'JVM Non-Heap', 'MB', 'resources', 'puppet.jvm', 'area'], + 'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm', 'area'], 'lines': [ - ['jvm_nonheap_committed', 'committed', 'absolute', 1, MB], - ['jvm_nonheap_used', 'used', 'absolute', 1, MB], + ['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB], + ['jvm_nonheap_used', 'used', 'absolute', 1, MiB], ], 'variables': [ ['jvm_nonheap_max'], @@ -73,9 +75,9 @@ CHARTS = { class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) - self.url = 'https://{0}:8140'.format(socket.getfqdn()) self.order = ORDER self.definitions = CHARTS + self.url = 'https://{0}:8140'.format(socket.getfqdn()) def _get_data(self): # NOTE: there are several ways to retrieve data diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf index 991bfabed..ff5c3d020 100644 --- a/collectors/python.d.plugin/puppet/puppet.conf +++ b/collectors/python.d.plugin/puppet/puppet.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # These configuration comes from UrlService base: @@ -89,10 +87,8 @@ # tls_cert_file: /path/to/client.crt # tls_key_file: /path/to/client.key # autodetection_retry: 1 -# retries: 3600 # # puppetserver: # url: 'https://fqdn.example.com:8140' # autodetection_retry: 1 -# retries: 3600 # diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf index 40c8c033f..72236209b 100644 --- a/collectors/python.d.plugin/python.d.conf +++ b/collectors/python.d.plugin/python.d.conf @@ -28,6 +28,7 @@ gc_interval: 300 # apache: yes # apache_cache has been replaced by web_log +# adaptec_raid: yes apache_cache: no # beanstalk: yes # bind_rndc: yes @@ -39,6 +40,7 @@ chrony: no # cpuidle: yes # dns_query_time: yes # dnsdist: yes +# dockerd: yes # dovecot: yes # elasticsearch: yes @@ -54,6 +56,7 @@ go_expvar: no gunicorn_log: no # haproxy: yes # hddtemp: yes +# httpcheck: yes # icecast: yes # ipfs: yes # isc_dhcpd: yes @@ -61,6 +64,7 @@ gunicorn_log: no # litespeed: yes logind: no # mdstat: yes +# megacli: yes # memcached: yes # mongodb: yes # monit: yes @@ -76,6 +80,7 @@ nginx_log: no # openldap: yes # ovpn_status_log: yes # phpfpm: yes +# portcheck: yes # postfix: yes # postgres: yes # powerdns: yes @@ -91,6 +96,7 @@ nginx_log: no # spigotmc: yes # springboot: yes # squid: yes +# traefik: yes # tomcat: yes # tor: yes unbound: no diff --git a/collectors/python.d.plugin/python.d.plugin b/collectors/python.d.plugin/python.d.plugin deleted file mode 100644 index efff22734..000000000 --- a/collectors/python.d.plugin/python.d.plugin +++ /dev/null @@ -1,427 +0,0 @@ -#!/usr/bin/env bash -'''':; exec "$(command -v python || command -v python3 || command -v python2 || -echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # ''' - -# -*- coding: utf-8 -*- -# Description: -# Author: Pawel Krupa (paulfantom) -# Author: Ilya Mashchenko (l2isbad) -# SPDX-License-Identifier: GPL-3.0-or-later - -import gc -import os -import sys -import threading - -from re import sub -from sys import version_info, argv -from time import sleep - -GC_RUN = True -GC_COLLECT_EVERY = 300 - -PY_VERSION = version_info[:2] - -USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '/usr/local/etc/netdata') -STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '/usr/local/lib/netdata/conf.d') - -PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d') -PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d') - - -PLUGINS_DIR = os.path.abspath(os.getenv( - 'NETDATA_PLUGINS_DIR', - os.path.dirname(__file__)) + '/../python.d') - - -PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules') - -sys.path.append(PYTHON_MODULES_DIR) - -from bases.loaders import ModuleAndConfigLoader # noqa: E402 -from bases.loggers import PythonDLogger # noqa: E402 -from bases.collection import setdefault_values, run_and_exit # noqa: E402 - -try: - from collections import OrderedDict -except ImportError: - from third_party.ordereddict import OrderedDict - -BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1), - 'retries': 60, - 'priority': 60000, - 'autodetection_retry': 0, - 'chart_cleanup': 10, - 'name': str()} - - -MODULE_EXTENSION = '.chart.py' -OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq'] - - -def module_ok(m): - return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES - - -ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)] - - -def parse_cmd(): - debug = 'debug' in argv[1:] - trace = 'trace' in argv[1:] - override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False) - modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES] - return debug, trace, override_update_every, modules or ALL_MODULES - - -def multi_job_check(config): - return next((True for key in config if isinstance(config[key], dict)), False) - - -class RawModule: - def __init__(self, name, path, explicitly_enabled=True): - self.name = name - self.path = path - self.explicitly_enabled = explicitly_enabled - - -class Job(object): - def __init__(self, initialized_job, job_id): - """ - :param initialized_job: instance of - :param job_id: - """ - self.job = initialized_job - self.id = job_id # key in Modules.jobs() - self.module_name = self.job.__module__ # used in Plugin.delete_job() - self.recheck_every = self.job.configuration.pop('autodetection_retry') - self.checked = False # used in Plugin.check_job() - self.created = False # used in Plugin.create_job_charts() - if self.job.update_every < int(OVERRIDE_UPDATE_EVERY): - self.job.update_every = int(OVERRIDE_UPDATE_EVERY) - - def __getattr__(self, item): - return getattr(self.job, item) - - def __repr__(self): - return self.job.__repr__() - - def is_dead(self): - return bool(self.ident) and not self.is_alive() - - def not_launched(self): - return not bool(self.ident) - - def is_autodetect(self): - return self.recheck_every - - -class Module(object): - def __init__(self, service, config): - """ - :param service: - :param config: - """ - self.service = service - self.name = service.__name__ - self.config = self.jobs_configurations_builder(config) - self.jobs = OrderedDict() - self.counter = 1 - - self.initialize_jobs() - - def __repr__(self): - return "".format(name=self.name) - - def __iter__(self): - return iter(OrderedDict(self.jobs).values()) - - def __getitem__(self, item): - return self.jobs[item] - - def __delitem__(self, key): - del self.jobs[key] - - def __len__(self): - return len(self.jobs) - - def __bool__(self): - return bool(self.jobs) - - def __nonzero__(self): - return self.__bool__() - - def jobs_configurations_builder(self, config): - """ - :param config: - :return: - """ - counter = 0 - job_base_config = dict() - - for attr in BASE_CONFIG: - job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr])) - - if not config: - config = {str(): dict()} - elif not multi_job_check(config): - config = {str(): config} - - for job_name in config: - if not isinstance(config[job_name], dict): - continue - - job_config = setdefault_values(config[job_name], base_dict=job_base_config) - job_name = sub(r'\s+', '_', job_name) - config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name']) - counter += 1 - job_id = 'job' + str(counter).zfill(3) - - yield job_id, job_name, job_config - - def initialize_jobs(self): - """ - :return: - """ - for job_id, job_name, job_config in self.config: - job_config['job_name'] = job_name - job_config['override_name'] = job_config.pop('name') - - try: - initialized_job = self.service.Service(configuration=job_config) - except Exception as error: - Logger.error("job initialization: '{module_name} {job_name}' " - "=> ['FAILED'] ({error})".format(module_name=self.name, - job_name=job_name, - error=error)) - continue - else: - Logger.debug("job initialization: '{module_name} {job_name}' " - "=> ['OK']".format(module_name=self.name, - job_name=job_name or self.name)) - self.jobs[job_id] = Job(initialized_job=initialized_job, - job_id=job_id) - del self.config - del self.service - - -class Plugin(object): - def __init__(self): - self.loader = ModuleAndConfigLoader() - self.modules = OrderedDict() - self.sleep_time = 1 - self.runs_counter = 0 - - user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf') - stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf') - - Logger.debug("loading '{0}'".format(user_config)) - self.config, error = self.loader.load_config_from_file(user_config) - - if error: - Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error)) - Logger.debug("loading '{0}'".format(stock_config)) - self.config, error = self.loader.load_config_from_file(stock_config) - if error: - Logger.error("cannot load '{0}': {1}".format(stock_config, error)) - - self.do_gc = self.config.get("gc_run", GC_RUN) - self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY) - - if not self.config.get('enabled', True): - run_and_exit(Logger.info)('DISABLED in configuration file.') - - self.load_and_initialize_modules() - if not self.modules: - run_and_exit(Logger.info)('No modules to run. Exit...') - - def __iter__(self): - return iter(OrderedDict(self.modules).values()) - - @property - def jobs(self): - return (job for mod in self for job in mod) - - @property - def dead_jobs(self): - return (job for job in self.jobs if job.is_dead()) - - @property - def autodetect_jobs(self): - return [job for job in self.jobs if job.not_launched()] - - def enabled_modules(self): - for mod in MODULES_TO_RUN: - mod_name = mod[:-len(MODULE_EXTENSION)] - mod_path = os.path.join(PLUGINS_DIR, mod) - if any( - [ - self.config.get('default_run', True) and self.config.get(mod_name, True), - (not self.config.get('default_run')) and self.config.get(mod_name), - ] - ): - yield RawModule( - name=mod_name, - path=mod_path, - explicitly_enabled=self.config.get(mod_name), - ) - - def load_and_initialize_modules(self): - for mod in self.enabled_modules(): - - # Load module from file ------------------------------------------------------------ - loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path) - log = Logger.error if error else Logger.debug - log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK', - module_name=mod.name)) - if error: - Logger.error("load source error : {0}".format(error)) - continue - - # Load module config from file ------------------------------------------------------ - user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf') - stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf') - - Logger.debug("loading '{0}'".format(user_config)) - loaded_config, error = self.loader.load_config_from_file(user_config) - if error: - Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error)) - Logger.debug("loading '{0}'".format(stock_config)) - loaded_config, error = self.loader.load_config_from_file(stock_config) - - if error: - Logger.error("cannot load '{0}': {1}".format(stock_config, error)) - - # Skip disabled modules - if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled: - Logger.info("module '{0}' disabled by default".format(loaded_module.__name__)) - continue - - # Module initialization --------------------------------------------------- - - initialized_module = Module(service=loaded_module, config=loaded_config) - Logger.debug("module status: '{module_name}' => [{status}] " - "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED', - module_name=initialized_module.name, - jobs_number=len(initialized_module))) - if initialized_module: - self.modules[initialized_module.name] = initialized_module - - @staticmethod - def check_job(job): - """ - :param job: - :return: - """ - try: - check_ok = bool(job.check()) - except Exception as error: - job.error('check() unhandled exception: {error}'.format(error=error)) - return None - else: - return check_ok - - @staticmethod - def create_job_charts(job): - """ - :param job: - :return: - """ - try: - create_ok = job.create() - except Exception as error: - job.error('create() unhandled exception: {error}'.format(error=error)) - return False - else: - return create_ok - - def delete_job(self, job): - """ - :param job: - :return: - """ - del self.modules[job.module_name][job.id] - - def run_check(self): - checked = list() - for job in self.jobs: - if job.name in checked: - job.info('check() => [DROPPED] (already served by another job)') - self.delete_job(job) - continue - ok = self.check_job(job) - if ok: - job.info('check() => [OK]') - checked.append(job.name) - job.checked = True - continue - if not job.is_autodetect() or ok is None: - job.info('check() => [FAILED]') - self.delete_job(job) - else: - job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every)) - - def run_create(self): - for job in self.jobs: - if not job.checked: - # skip autodetection_retry jobs - continue - ok = self.create_job_charts(job) - if ok: - job.debug('create() => [OK] (charts: {0})'.format(len(job.charts))) - job.created = True - continue - job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts))) - self.delete_job(job) - - def start(self): - self.run_check() - self.run_create() - for job in self.jobs: - if job.created: - job.start() - - while True: - if threading.active_count() <= 1 and not self.autodetect_jobs: - run_and_exit(Logger.info)('FINISHED') - - sleep(self.sleep_time) - self.cleanup() - self.autodetect_retry() - - # FIXME: https://github.com/netdata/netdata/issues/3817 - if self.do_gc and self.runs_counter % self.gc_interval == 0: - v = gc.collect() - Logger.debug("GC full collection run result: {0}".format(v)) - - def cleanup(self): - for job in self.dead_jobs: - self.delete_job(job) - for mod in self: - if not mod: - del self.modules[mod.name] - - def autodetect_retry(self): - self.runs_counter += self.sleep_time - for job in self.autodetect_jobs: - if self.runs_counter % job.recheck_every == 0: - checked = self.check_job(job) - if checked: - created = self.create_job_charts(job) - if not created: - self.delete_job(job) - continue - job.start() - - -if __name__ == '__main__': - DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd() - Logger = PythonDLogger() - if DEBUG: - Logger.logger.severity = 'DEBUG' - if TRACE: - Logger.log_traceback = True - Logger.info('Using python {version}'.format(version=PY_VERSION[0])) - - plugin = Plugin() - plugin.start() diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in old mode 100755 new mode 100644 index 8b55ad41b..6521fed94 --- a/collectors/python.d.plugin/python.d.plugin.in +++ b/collectors/python.d.plugin/python.d.plugin.in @@ -48,15 +48,15 @@ except ImportError: from third_party.ordereddict import OrderedDict BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1), - 'retries': 60, 'priority': 60000, 'autodetection_retry': 0, 'chart_cleanup': 10, + 'penalty': True, 'name': str()} MODULE_EXTENSION = '.chart.py' -OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq'] +OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq', 'cpuidle', 'mdstat', 'linux_power_supply'] def module_ok(m): diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py index 53807e2c4..9a694aa82 100644 --- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py @@ -131,20 +131,22 @@ class MySQLService(SimpleService): raw_data = dict() queries = dict(self.queries) try: - with self.__connection as cursor: - for name, query in queries.items(): - try: - cursor.execute(query) - except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error: - if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)): - raise RuntimeError - self.error('Removed query: {name}[{query}]. Error: error'.format(name=name, - query=query, - error=error)) - self.queries.pop(name) - continue - else: - raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall() + cursor = self.__connection.cursor() + for name, query in queries.items(): + try: + cursor.execute(query) + except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error: + if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)): + cursor.close() + raise RuntimeError + self.error('Removed query: {name}[{query}]. Error: error'.format(name=name, + query=query, + error=error)) + self.queries.pop(name) + continue + else: + raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall() + cursor.close() self.__connection.commit() except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError): self.__connection.close() diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py index dd53fbc14..c7ab7f244 100644 --- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later from threading import Thread -from time import sleep +from time import sleep, time from third_party.monotonic import monotonic @@ -17,25 +17,42 @@ RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \ 'SET run_time = {elapsed}\n' \ 'END\n' +PENALTY_EVERY = 5 +MAX_PENALTY = 10 * 60 # 10 minutes + class RuntimeCounters: def __init__(self, configuration): """ :param configuration: """ - self.FREQ = int(configuration.pop('update_every')) - self.START_RUN = 0 - self.NEXT_RUN = 0 - self.PREV_UPDATE = 0 - self.SINCE_UPDATE = 0 - self.ELAPSED = 0 - self.RETRIES = 0 - self.RETRIES_MAX = configuration.pop('retries') - self.PENALTY = 0 - self.RUNS = 1 + self.update_every = int(configuration.pop('update_every')) + self.do_penalty = configuration.pop('penalty') + + self.start_mono = 0 + self.start_real = 0 + self.retries = 0 + self.penalty = 0 + self.elapsed = 0 + self.prev_update = 0 + + self.runs = 1 - def is_sleep_time(self): - return self.START_RUN < self.NEXT_RUN + def calc_next(self): + self.start_mono = monotonic() + return self.start_mono - (self.start_mono % self.update_every) + self.update_every + self.penalty + + def sleep_until_next(self): + next_time = self.calc_next() + while self.start_mono < next_time: + sleep(next_time - self.start_mono) + self.start_mono = monotonic() + self.start_real = time() + + def handle_retries(self): + self.retries += 1 + if self.do_penalty and self.retries % PENALTY_EVERY == 0: + self.penalty = round(min(self.retries * self.update_every / 2, MAX_PENALTY)) class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, object): @@ -83,11 +100,11 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec @property def runs_counter(self): - return self._runtime_counters.RUNS + return self._runtime_counters.runs @property def update_every(self): - return self._runtime_counters.FREQ + return self._runtime_counters.update_every @update_every.setter def update_every(self, value): @@ -95,7 +112,7 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec :param value: :return: """ - self._runtime_counters.FREQ = value + self._runtime_counters.update_every = value def get_update_every(self): return self.update_every @@ -163,41 +180,36 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec :return: None """ job = self._runtime_counters - self.debug('started, update frequency: {freq}, ' - 'retries: {retries}'.format(freq=job.FREQ, retries=job.RETRIES_MAX - job.RETRIES)) + self.debug('started, update frequency: {freq}'.format(freq=job.update_every)) while True: - job.START_RUN = monotonic() - - job.NEXT_RUN = job.START_RUN - (job.START_RUN % job.FREQ) + job.FREQ + job.PENALTY + job.sleep_until_next() - self.sleep_until_next_run() - - if job.PREV_UPDATE: - job.SINCE_UPDATE = int((job.START_RUN - job.PREV_UPDATE) * 1e6) + since = 0 + if job.prev_update: + since = int((job.start_real - job.prev_update) * 1e6) try: - updated = self.update(interval=job.SINCE_UPDATE) + updated = self.update(interval=since) except Exception as error: self.error('update() unhandled exception: {error}'.format(error=error)) updated = False - job.RUNS += 1 + job.runs += 1 if not updated: - if not self.manage_retries(): - return + job.handle_retries() else: - job.ELAPSED = int((monotonic() - job.START_RUN) * 1e3) - job.PREV_UPDATE = job.START_RUN - job.RETRIES, job.PENALTY = 0, 0 + job.elapsed = int((monotonic() - job.start_mono) * 1e3) + job.prev_update = job.start_real + job.retries, job.penalty = 0, 0 safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name, - since_last=job.SINCE_UPDATE, - elapsed=job.ELAPSED)) - self.debug('update => [{status}] (elapsed time: {elapsed}, ' - 'retries left: {retries})'.format(status='OK' if updated else 'FAILED', - elapsed=job.ELAPSED if updated else '-', - retries=job.RETRIES_MAX - job.RETRIES)) + since_last=since, + elapsed=job.elapsed)) + self.debug('update => [{status}] (elapsed time: {elapsed}, failed retries in a row: {retries})'.format( + status='OK' if updated else 'FAILED', + elapsed=job.elapsed if updated else '-', + retries=job.retries)) def update(self, interval): """ @@ -233,27 +245,6 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec return updated - def manage_retries(self): - rc = self._runtime_counters - rc.RETRIES += 1 - if rc.RETRIES % 5 == 0: - rc.PENALTY = int(rc.RETRIES * self.update_every / 2) - if rc.RETRIES >= rc.RETRIES_MAX: - self.error('stopped after {0} data collection failures in a row'.format(rc.RETRIES_MAX)) - return False - return True - - def sleep_until_next_run(self): - job = self._runtime_counters - - # sleep() is interruptable - while job.is_sleep_time(): - sleep_time = job.NEXT_RUN - job.START_RUN - self.debug('sleeping for {sleep_time} to reach frequency of {freq} sec'.format(sleep_time=sleep_time, - freq=job.FREQ + job.PENALTY)) - sleep(sleep_time) - job.START_RUN = monotonic() - def get_data(self): return self._get_data() diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py index e85455307..f5e6380b8 100644 --- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py @@ -75,9 +75,11 @@ class SocketService(SimpleService): keyfile=self.key, certfile=self.cert, server_side=False, - cert_reqs=ssl.CERT_NONE) + cert_reqs=ssl.CERT_NONE, + ssl_version=ssl.PROTOCOL_TLS, + ) except (socket.error, ssl.SSLError) as error: - self.error('Failed to wrap socket.') + self.error('failed to wrap socket : {0}'.format(error)) self._disconnect() self.__socket_config = None return False @@ -169,8 +171,8 @@ class SocketService(SimpleService): self.debug('closing socket') self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all self._sock.close() - except Exception: - pass + except Exception as error: + self.error(error) self._sock = None def _send(self, request=None): diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py index 856f38851..011efff9e 100644 --- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py +++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py @@ -26,6 +26,7 @@ class UrlService(SimpleService): self.method = self.configuration.get('method', 'GET') self.header = self.configuration.get('header') self.request_timeout = self.configuration.get('timeout', 1) + self.respect_retry_after_header = self.configuration.get('respect_retry_after_header') self.tls_verify = self.configuration.get('tls_verify') self.tls_ca_file = self.configuration.get('tls_ca_file') self.tls_key_file = self.configuration.get('tls_key_file') @@ -111,12 +112,18 @@ class UrlService(SimpleService): """ url = url or self.url manager = manager or self._manager - response = manager.request(method=self.method, - url=url, - timeout=self.request_timeout, - retries=retries, - headers=manager.headers, - redirect=redirect) + retry = urllib3.Retry(retries) + if hasattr(retry, 'respect_retry_after_header'): + retry.respect_retry_after_header = bool(self.respect_retry_after_header) + + response = manager.request( + method=self.method, + url=url, + timeout=self.request_timeout, + retries=retry, + headers=manager.headers, + redirect=redirect, + ) if isinstance(response.data, str): return response.status, response.data return response.status, response.data.decode() diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py index 2963739ec..0a0719056 100644 --- a/collectors/python.d.plugin/python_modules/bases/charts.py +++ b/collectors/python.d.plugin/python_modules/bases/charts.py @@ -45,7 +45,7 @@ def create_runtime_chart(func): ok = func(*args, **kwargs) if ok: safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name, - update_every=self._runtime_counters.FREQ)) + update_every=self._runtime_counters.update_every)) return ok return wrapper diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py index 39be77a79..098294d3e 100644 --- a/collectors/python.d.plugin/python_modules/bases/loggers.py +++ b/collectors/python.d.plugin/python_modules/bases/loggers.py @@ -34,7 +34,7 @@ def limiter(log_max_count=30, allowed_in_seconds=60): def on_decorator(func): def on_call(*args): - current_time = args[0]._runtime_counters.START_RUN + current_time = args[0]._runtime_counters.start_mono lc = args[0]._logger_counters if lc.logged and lc.logged % log_max_count == 0: diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md index 22d367c4d..4ac606057 100644 --- a/collectors/python.d.plugin/rabbitmq/README.md +++ b/collectors/python.d.plugin/rabbitmq/README.md @@ -54,3 +54,5 @@ socket: When no configuration file is found, module tries to connect to: `localhost:15672`. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py index 8298b4032..a8f72592f 100644 --- a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py +++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py @@ -3,23 +3,12 @@ # Author: l2isbad # SPDX-License-Identifier: GPL-3.0-or-later -from collections import namedtuple from json import loads -from socket import gethostbyname, gaierror -from threading import Thread -try: - from queue import Queue -except ImportError: - from Queue import Queue from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -update_every = 1 -priority = 60000 -retries = 60 - -METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats']) +API_NODE = 'api/nodes' +API_OVERVIEW = 'api/overview' NODE_STATS = [ 'fd_used', @@ -64,15 +53,15 @@ CHARTS = { ] }, 'memory': { - 'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'line'], + 'options': [None, 'Memory', 'MiB', 'overview', 'rabbitmq.memory', 'area'], 'lines': [ - ['mem_used', 'used', 'absolute', 1, 1024 << 10] + ['mem_used', 'used', 'absolute', 1, 1 << 20] ] }, 'disk_space': { - 'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'line'], + 'options': [None, 'Disk Space', 'GiB', 'overview', 'rabbitmq.disk_space', 'area'], 'lines': [ - ['disk_free', 'free', 'absolute', 1, 1024 ** 3] + ['disk_free', 'free', 'absolute', 1, 1 << 30] ] }, 'socket_descriptors': { @@ -111,7 +100,7 @@ CHARTS = { ] }, 'message_rates': { - 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'stacked'], + 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'line'], 'lines': [ ['message_stats_ack', 'ack', 'incremental'], ['message_stats_redeliver', 'redeliver', 'incremental'], @@ -127,74 +116,62 @@ class Service(UrlService): UrlService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = CHARTS - self.host = self.configuration.get('host', '127.0.0.1') - self.port = self.configuration.get('port', 15672) - self.scheme = self.configuration.get('scheme', 'http') + self.url = '{0}://{1}:{2}'.format( + configuration.get('scheme', 'http'), + configuration.get('host', '127.0.0.1'), + configuration.get('port', 15672), + ) + self.node_name = str() - def check(self): - # We can't start if AND not specified - if not (self.host and self.port): - self.error('Host is not defined in the module configuration file') - return False + def _get_data(self): + data = dict() - # Hostname -> ip address - try: - self.host = gethostbyname(self.host) - except gaierror as error: - self.error(str(error)) - return False - - # Add handlers (auth, self signed cert accept) - self.url = '{scheme}://{host}:{port}/api'.format(scheme=self.scheme, - host=self.host, - port=self.port) - # Add methods - api_node = self.url + '/nodes' - api_overview = self.url + '/overview' - self.methods = [METHODS(get_data=self._get_overview_stats, - url=api_node, - stats=NODE_STATS), - METHODS(get_data=self._get_overview_stats, - url=api_overview, - stats=OVERVIEW_STATS)] - return UrlService.check(self) + stats = self.get_overview_stats() - def _get_data(self): - threads = list() - queue = Queue() - result = dict() + if not stats: + return None + + data.update(stats) + + stats = self.get_nodes_stats() + + if not stats: + return None + + data.update(stats) - for method in self.methods: - th = Thread(target=method.get_data, - args=(queue, method.url, method.stats)) - th.start() - threads.append(th) + return data or None - for thread in threads: - thread.join() - result.update(queue.get()) + def get_overview_stats(self): + url = '{0}/{1}'.format(self.url, API_OVERVIEW) - return result or None + raw = self._get_raw_data(url) - def _get_overview_stats(self, queue, url, stats): - """ - Format data received from http request - :return: dict - """ + if not raw: + return None - raw_data = self._get_raw_data(url) + data = loads(raw) - if not raw_data: - return queue.put(dict()) - data = loads(raw_data) - data = data[0] if isinstance(data, list) else data + self.node_name = data['node'] - to_netdata = fetch_data(raw_data=data, metrics=stats) - return queue.put(to_netdata) + return fetch_data(raw_data=data, metrics=OVERVIEW_STATS) + + def get_nodes_stats(self): + url = '{0}/{1}/{2}'.format(self.url, API_NODE, self.node_name) + + raw = self._get_raw_data(url) + + if not raw: + return None + + data = loads(raw) + + return fetch_data(raw_data=data, metrics=NODE_STATS) def fetch_data(raw_data, metrics): data = dict() + for metric in metrics: value = raw_data metrics_list = metric.split('.') @@ -204,4 +181,5 @@ def fetch_data(raw_data, metrics): except KeyError: continue data['_'.join(metrics_list)] = value + return data diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf index 3f90da8a2..ae0dbdb75 100644 --- a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf +++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, rabbitmq plugin also supports the following: diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md index 8d21df0ca..0bea0376e 100644 --- a/collectors/python.d.plugin/redis/README.md +++ b/collectors/python.d.plugin/redis/README.md @@ -40,3 +40,5 @@ localhost: When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fredis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py index 37d55ebfe..9dbb2c164 100644 --- a/collectors/python.d.plugin/redis/redis.chart.py +++ b/collectors/python.d.plugin/redis/redis.chart.py @@ -47,13 +47,13 @@ CHARTS = { ] }, 'hit_rate': { - 'options': [None, 'Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'], + 'options': [None, 'Hit rate', 'percentage', 'hits', 'redis.hit_rate', 'line'], 'lines': [ ['hit_rate', 'rate', 'absolute'] ] }, 'memory': { - 'options': [None, 'Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'], + 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'line'], 'lines': [ ['used_memory', 'total', 'absolute', 1, 1024], ['used_memory_lua', 'lua', 'absolute', 1, 1024] @@ -62,8 +62,8 @@ CHARTS = { 'net': { 'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'], 'lines': [ - ['total_net_input_bytes', 'in', 'incremental', 8, 1024], - ['total_net_output_bytes', 'out', 'incremental', -8, 1024] + ['total_net_input_bytes', 'in', 'incremental', 8, 1000], + ['total_net_output_bytes', 'out', 'incremental', -8, 1000] ] }, 'keys_redis': { @@ -146,16 +146,13 @@ RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)') class Service(SocketService): def __init__(self, configuration=None, name=None): SocketService.__init__(self, configuration=configuration, name=name) - self._keep_alive = True - self.order = list() self.definitions = dict() - + self._keep_alive = True self.host = self.configuration.get('host', 'localhost') self.port = self.configuration.get('port', 6379) self.unix_socket = self.configuration.get('socket') p = self.configuration.get('pass') - self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None self.request = 'INFO\r\n'.encode() self.bgsave_time = 0 diff --git a/collectors/python.d.plugin/redis/redis.conf b/collectors/python.d.plugin/redis/redis.conf index 6363f6da7..b456d75d3 100644 --- a/collectors/python.d.plugin/redis/redis.conf +++ b/collectors/python.d.plugin/redis/redis.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, redis also supports the following: diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md index 5d357fa49..183c7f733 100644 --- a/collectors/python.d.plugin/rethinkdbs/README.md +++ b/collectors/python.d.plugin/rethinkdbs/README.md @@ -32,3 +32,5 @@ localhost: When no configuration file is found, module tries to connect to `127.0.0.1:28015`. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frethinkdbs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py index 127e9ad4b..da2f26f4a 100644 --- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py +++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py @@ -136,13 +136,11 @@ class Service(SimpleService): SimpleService.__init__(self, configuration=configuration, name=name) self.order = list(ORDER) self.definitions = cluster_charts() - self.host = self.configuration.get('host', '127.0.0.1') self.port = self.configuration.get('port', 28015) self.user = self.configuration.get('user', 'admin') self.password = self.configuration.get('password') self.timeout = self.configuration.get('timeout', 2) - self.conn = None self.alive = True diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf index 73544fc2e..d671acbb0 100644 --- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf +++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, rethinkdb also supports the following: diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md index e95095c65..a8a58880e 100644 --- a/collectors/python.d.plugin/retroshare/README.md +++ b/collectors/python.d.plugin/retroshare/README.md @@ -1 +1,3 @@ # retroshare + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fretroshare%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py index 1d8e35050..feb871fbd 100644 --- a/collectors/python.d.plugin/retroshare/retroshare.chart.py +++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py @@ -7,26 +7,25 @@ import json from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['bandwidth', 'peers', 'dht'] +ORDER = [ + 'bandwidth', + 'peers', + 'dht', +] CHARTS = { 'bandwidth': { - 'options': [None, 'RetroShare Bandwidth', 'kB/s', 'RetroShare', 'retroshare.bandwidth', 'area'], + 'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'], 'lines': [ - ['bandwidth_up_kb', 'Upload'], + ['bandwidth_up_kb', 'Upload'], ['bandwidth_down_kb', 'Download'] ] }, 'peers': { 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'], 'lines': [ - ['peers_all', 'All friends'], + ['peers_all', 'All friends'], ['peers_connected', 'Connected friends'] ] }, @@ -34,7 +33,7 @@ CHARTS = { 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'], 'lines': [ ['dht_size_all', 'DHT nodes estimated'], - ['dht_size_rs', 'RS nodes estimated'] + ['dht_size_rs', 'RS nodes estimated'] ] } } @@ -43,9 +42,9 @@ CHARTS = { class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) - self.baseurl = self.configuration.get('url', 'http://localhost:9090') self.order = ORDER self.definitions = CHARTS + self.baseurl = self.configuration.get('url', 'http://localhost:9090') def _get_stats(self): """ diff --git a/collectors/python.d.plugin/retroshare/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf index 9c92583f7..3d0af538d 100644 --- a/collectors/python.d.plugin/retroshare/retroshare.conf +++ b/collectors/python.d.plugin/retroshare/retroshare.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, RetroShare also supports the following: diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md index 44610d373..97f2e3d33 100644 --- a/collectors/python.d.plugin/samba/README.md +++ b/collectors/python.d.plugin/samba/README.md @@ -65,3 +65,5 @@ samba: yes ``` --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsamba%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py index b2278de9e..ac89c29b0 100644 --- a/collectors/python.d.plugin/samba/samba.chart.py +++ b/collectors/python.d.plugin/samba/samba.chart.py @@ -24,10 +24,7 @@ from bases.FrameworkServices.ExecutableService import ExecutableService disabled_by_default = True -# default module values (can be overridden per job in `config`) update_every = 5 -priority = 60000 -retries = 60 ORDER = [ 'syscall_rw', @@ -41,14 +38,14 @@ ORDER = [ CHARTS = { 'syscall_rw': { - 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area'], + 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'], 'lines': [ ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024], ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024] ] }, 'smb2_rw': { - 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area'], + 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'], 'lines': [ ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024], ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024], diff --git a/collectors/python.d.plugin/samba/samba.conf b/collectors/python.d.plugin/samba/samba.conf index ee513c60f..db15d4e9e 100644 --- a/collectors/python.d.plugin/samba/samba.conf +++ b/collectors/python.d.plugin/samba/samba.conf @@ -27,11 +27,9 @@ update_every: 5 # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,5 +56,5 @@ update_every: 5 # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md index eb1642d90..e3f956f11 100644 --- a/collectors/python.d.plugin/sensors/README.md +++ b/collectors/python.d.plugin/sensors/README.md @@ -15,3 +15,5 @@ We are tracking such cases in issue [#827](https://github.com/netdata/netdata/is Please join this discussion for help. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py index d70af3b05..e622eb8e6 100644 --- a/collectors/python.d.plugin/sensors/sensors.chart.py +++ b/collectors/python.d.plugin/sensors/sensors.chart.py @@ -7,8 +7,6 @@ from third_party import lm_sensors as sensors from bases.FrameworkServices.SimpleService import SimpleService -# default module values (can be overridden per job in `config`) -# update_every = 2 ORDER = [ 'temperature', @@ -139,7 +137,7 @@ class Service(SimpleService): except sensors.SensorsError as error: self.error('{0}: {1}'.format(sf.name, error)) continue - if not vals or vals[0] == 0: + if not vals or (vals[0] == 0 and feature.type != 1): continue if TYPE_MAP[feature.type] == sensor: # create chart diff --git a/collectors/python.d.plugin/sensors/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf index 83bbffd7d..d3369ba66 100644 --- a/collectors/python.d.plugin/sensors/sensors.conf +++ b/collectors/python.d.plugin/sensors/sensors.conf @@ -19,11 +19,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md index a31ad0c7a..3b0816fb8 100644 --- a/collectors/python.d.plugin/smartd_log/README.md +++ b/collectors/python.d.plugin/smartd_log/README.md @@ -99,3 +99,5 @@ local: If no configuration is given, module will attempt to read log files in `/var/log/smartd/` directory. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsmartd_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py index 13762fabe..871025a47 100644 --- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py +++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py @@ -268,7 +268,7 @@ CHARTS = { 'algo': INCREMENTAL, }, 'reserved_block_count': { - 'options': [None, 'Reserved Block Count', '%', 'wear', 'smartd_log.reserved_block_count', 'line'], + 'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'], 'lines': [], 'attrs': [ATTR170], 'algo': ABSOLUTE, @@ -321,7 +321,7 @@ CHARTS = { }, 'percent_lifetime_used': { - 'options': [None, 'Percent Lifetime Used', '%', 'wear', 'smartd_log.percent_lifetime_used', 'line'], + 'options': [None, 'Percent Lifetime Used', 'percentage', 'wear', 'smartd_log.percent_lifetime_used', 'line'], 'lines': [], 'attrs': [ATTR202], 'algo': ABSOLUTE, @@ -453,6 +453,11 @@ class Ata190(BaseAtaSmartAttribute): return 100 - int(self.normalized_value) +class Ata194(BaseAtaSmartAttribute): + def value(self): + return min(int(self.normalized_value), int(self.raw_value)) + + class BaseSCSISmartAttribute: def __init__(self, name, raw_value): self.name = name @@ -474,10 +479,11 @@ def ata_attribute_factory(value): return Ata9(*value) elif name == ATTR190: return Ata190(*value) + elif name == ATTR194: + return Ata194(*value) elif name in [ ATTR1, ATTR7, - ATTR194, ATTR202, ATTR206, ]: @@ -580,11 +586,9 @@ class Service(SimpleService): SimpleService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = deepcopy(CHARTS) - self.log_path = configuration.get('log_path', DEF_PATH) self.age = configuration.get('age', DEF_AGE) self.exclude = configuration.get('exclude_disks', str()).split() - self.disks = list() self.runs = 0 @@ -646,6 +650,10 @@ class Service(SimpleService): return len(self.disks) def create_disk_from_file(self, full_name, current_time): + if not full_name.endswith(CSV): + self.debug('skipping {0}: not a csv file'.format(full_name)) + return None + name = os.path.basename(full_name).split('.')[-3] path = os.path.join(self.log_path, full_name) @@ -655,10 +663,6 @@ class Service(SimpleService): if [p for p in self.exclude if p in name]: return None - if not full_name.endswith(CSV): - self.debug('skipping {0}: not a csv file'.format(full_name)) - return None - if not os.access(path, os.R_OK): self.debug('skipping {0}: not readable'.format(full_name)) return None diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf index ab7f45b0f..4f138d17a 100644 --- a/collectors/python.d.plugin/smartd_log/smartd_log.conf +++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, smartd_log also supports the following: diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md index ae5602587..c38930558 100644 --- a/collectors/python.d.plugin/spigotmc/README.md +++ b/collectors/python.d.plugin/spigotmc/README.md @@ -20,3 +20,5 @@ password: pass By default, a connection to port 25575 on the local system is attempted with an empty password. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspigotmc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py index a5e5ee0ee..09674f5c9 100644 --- a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py +++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py @@ -16,7 +16,10 @@ update_every = 5 PRECISION = 100 -ORDER = ['tps', 'users'] +ORDER = [ + 'tps', + 'users', +] CHARTS = { 'tps': { diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf index 3ba492def..ccb5e2636 100644 --- a/collectors/python.d.plugin/spigotmc/spigotmc.conf +++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # In addition to the above, spigotmc supports the following: diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md index a1817cc2b..b5b776dd0 100644 --- a/collectors/python.d.plugin/springboot/README.md +++ b/collectors/python.d.plugin/springboot/README.md @@ -120,3 +120,5 @@ You can disable the default charts by set `defaults.: false`. The dimension name of extras charts should replace `.` to `_`. Please check [springboot.conf](springboot.conf) for more examples. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspringboot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py index 7df37e1d0..eec870ebf 100644 --- a/collectors/python.d.plugin/springboot/springboot.chart.py +++ b/collectors/python.d.plugin/springboot/springboot.chart.py @@ -6,13 +6,14 @@ import json from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -DEFAULT_ORDER = ['response_code', 'threads', 'gc_time', 'gc_ope', 'heap'] +DEFAULT_ORDER = [ + 'response_code', + 'threads', + 'gc_time', + 'gc_ope', + 'heap', +] DEFAULT_CHARTS = { 'response_code': { @@ -60,7 +61,7 @@ DEFAULT_CHARTS = { ] }, 'heap': { - 'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"], + 'options': [None, "Heap Memory Usage", "KiB", "heap memory", "springboot.heap", "area"], 'lines': [ ["heap_committed", 'committed', "absolute"], ["heap_used", 'used', "absolute"], diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf index 40b5fb437..13a398955 100644 --- a/collectors/python.d.plugin/springboot/springboot.conf +++ b/collectors/python.d.plugin/springboot/springboot.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -53,7 +51,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, this plugin also supports the following: diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md index 9c9b62f27..b278f4191 100644 --- a/collectors/python.d.plugin/squid/README.md +++ b/collectors/python.d.plugin/squid/README.md @@ -36,3 +36,5 @@ local: Without any configuration module will try to autodetect where squid presents its `counters` data --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py index fd54168f0..c00556b56 100644 --- a/collectors/python.d.plugin/squid/squid.chart.py +++ b/collectors/python.d.plugin/squid/squid.chart.py @@ -6,13 +6,12 @@ from bases.FrameworkServices.SocketService import SocketService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 - -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests'] +ORDER = [ + 'clients_net', + 'clients_requests', + 'servers_net', + 'servers_requests', +] CHARTS = { 'clients_net': { diff --git a/collectors/python.d.plugin/squid/squid.conf b/collectors/python.d.plugin/squid/squid.conf index 564187f00..b90a52c0c 100644 --- a/collectors/python.d.plugin/squid/squid.conf +++ b/collectors/python.d.plugin/squid/squid.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, squid also supports the following: diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md index e548bd338..21e3896a3 100644 --- a/collectors/python.d.plugin/tomcat/README.md +++ b/collectors/python.d.plugin/tomcat/README.md @@ -31,3 +31,5 @@ Without configuration, module attempts to connect to `http://localhost:8080/mana So it will probably fail. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py index 3c2d0ed40..01578c56e 100644 --- a/collectors/python.d.plugin/tomcat/tomcat.chart.py +++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py @@ -8,13 +8,18 @@ import xml.etree.ElementTree as ET from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 +MiB = 1 << 20 -# charts order (can be overridden if you want less charts, or different order) -ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured'] +ORDER = [ + 'accesses', + 'bandwidth', + 'processing_time', + 'threads', + 'jvm', + 'jvm_eden', + 'jvm_survivor', + 'jvm_tenured', +] CHARTS = { 'accesses': { @@ -25,7 +30,7 @@ CHARTS = { ] }, 'bandwidth': { - 'options': [None, 'Bandwidth', 'KB/s', 'statistics', 'tomcat.bandwidth', 'area'], + 'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'], 'lines': [ ['bytesSent', 'sent', 'incremental', 1, 1024], ['bytesReceived', 'received', 'incremental', 1, 1024], @@ -45,39 +50,39 @@ CHARTS = { ] }, 'jvm': { - 'options': [None, 'JVM Memory Pool Usage', 'MB', 'memory', 'tomcat.jvm', 'stacked'], + 'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'], 'lines': [ - ['free', 'free', 'absolute', 1, 1048576], - ['eden_used', 'eden', 'absolute', 1, 1048576], - ['survivor_used', 'survivor', 'absolute', 1, 1048576], - ['tenured_used', 'tenured', 'absolute', 1, 1048576], - ['code_cache_used', 'code cache', 'absolute', 1, 1048576], - ['compressed_used', 'compressed', 'absolute', 1, 1048576], - ['metaspace_used', 'metaspace', 'absolute', 1, 1048576], + ['free', 'free', 'absolute', 1, MiB], + ['eden_used', 'eden', 'absolute', 1, MiB], + ['survivor_used', 'survivor', 'absolute', 1, MiB], + ['tenured_used', 'tenured', 'absolute', 1, MiB], + ['code_cache_used', 'code cache', 'absolute', 1, MiB], + ['compressed_used', 'compressed', 'absolute', 1, MiB], + ['metaspace_used', 'metaspace', 'absolute', 1, MiB], ] }, 'jvm_eden': { - 'options': [None, 'Eden Memory Usage', 'MB', 'memory', 'tomcat.jvm_eden', 'area'], + 'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'], 'lines': [ - ['eden_used', 'used', 'absolute', 1, 1048576], - ['eden_committed', 'committed', 'absolute', 1, 1048576], - ['eden_max', 'max', 'absolute', 1, 1048576] + ['eden_used', 'used', 'absolute', 1, MiB], + ['eden_committed', 'committed', 'absolute', 1, MiB], + ['eden_max', 'max', 'absolute', 1, MiB] ] }, 'jvm_survivor': { - 'options': [None, 'Survivor Memory Usage', 'MB', 'memory', 'tomcat.jvm_survivor', 'area'], + 'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'], 'lines': [ - ['survivor_used', 'used', 'absolute', 1, 1048576], - ['survivor_committed', 'committed', 'absolute', 1, 1048576], - ['survivor_max', 'max', 'absolute', 1, 1048576] + ['survivor_used', 'used', 'absolute', 1, MiB], + ['survivor_committed', 'committed', 'absolute', 1, MiB], + ['survivor_max', 'max', 'absolute', 1, MiB], ] }, 'jvm_tenured': { - 'options': [None, 'Tenured Memory Usage', 'MB', 'memory', 'tomcat.jvm_tenured', 'area'], + 'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'], 'lines': [ - ['tenured_used', 'used', 'absolute', 1, 1048576], - ['tenured_committed', 'committed', 'absolute', 1, 1048576], - ['tenured_max', 'max', 'absolute', 1, 1048576] + ['tenured_used', 'used', 'absolute', 1, MiB], + ['tenured_committed', 'committed', 'absolute', 1, MiB], + ['tenured_max', 'max', 'absolute', 1, MiB] ] } } @@ -86,10 +91,10 @@ CHARTS = { class Service(UrlService): def __init__(self, configuration=None, name=None): UrlService.__init__(self, configuration=configuration, name=name) - self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true') - self.connector_name = self.configuration.get('connector_name', None) self.order = ORDER self.definitions = CHARTS + self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true') + self.connector_name = self.configuration.get('connector_name', None) def _get_data(self): """ diff --git a/collectors/python.d.plugin/tomcat/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf index c63f06cfa..009591bdf 100644 --- a/collectors/python.d.plugin/tomcat/tomcat.conf +++ b/collectors/python.d.plugin/tomcat/tomcat.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, tomcat also supports the following: diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md index 4a8833730..2ce0f25f3 100644 --- a/collectors/python.d.plugin/tor/README.md +++ b/collectors/python.d.plugin/tor/README.md @@ -44,3 +44,5 @@ For more options please read the manual. Without configuration, module attempts to connect to `127.0.0.1:9051`. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py index b77632bd4..dd61e6e9e 100644 --- a/collectors/python.d.plugin/tor/tor.chart.py +++ b/collectors/python.d.plugin/tor/tor.chart.py @@ -24,7 +24,7 @@ ORDER = [ CHARTS = { 'traffic': { - 'options': [None, 'Tor Traffic', 'KB/s', 'traffic', 'tor.traffic', 'area'], + 'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'], 'lines': [ ['read', 'read', 'incremental', 1, 1024], ['write', 'write', 'incremental', 1, -1024], @@ -39,10 +39,8 @@ class Service(SimpleService): super(Service, self).__init__(configuration=configuration, name=name) self.order = ORDER self.definitions = CHARTS - self.port = self.configuration.get('control_port', DEF_PORT) self.password = self.configuration.get('password') - self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit() self.conn = None self.alive = False diff --git a/collectors/python.d.plugin/tor/tor.conf b/collectors/python.d.plugin/tor/tor.conf index 8245414fb..91b517a62 100644 --- a/collectors/python.d.plugin/tor/tor.conf +++ b/collectors/python.d.plugin/tor/tor.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 10 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, tor plugin also supports the following: diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md index 9b4a18208..61e0fdb72 100644 --- a/collectors/python.d.plugin/traefik/README.md +++ b/collectors/python.d.plugin/traefik/README.md @@ -46,9 +46,10 @@ priority : 60000 local: url : 'http://localhost:8080/health' - retries : 10 ``` Without configuration, module attempts to connect to `http://localhost:8080/health`. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftraefik%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py index dc8933220..570339d0a 100644 --- a/collectors/python.d.plugin/traefik/traefik.chart.py +++ b/collectors/python.d.plugin/traefik/traefik.chart.py @@ -3,16 +3,13 @@ # Author: Alexandre Menezes (@ale_menezes) # SPDX-License-Identifier: GPL-3.0-or-later -from json import loads from collections import defaultdict + +from json import loads + from bases.FrameworkServices.UrlService import UrlService -# default module values (can be overridden per job in `config`) -update_every = 1 -priority = 60000 -retries = 10 -# charts order (can be overridden if you want less charts, or different order) ORDER = [ 'response_statuses', 'response_codes', @@ -99,14 +96,22 @@ class Service(UrlService): self.url = self.configuration.get('url', 'http://localhost:8080/health') self.order = ORDER self.definitions = CHARTS - self.data = { - 'successful_requests': 0, 'redirects': 0, 'bad_requests': 0, - 'server_errors': 0, 'other_requests': 0, '1xx': 0, '2xx': 0, - '3xx': 0, '4xx': 0, '5xx': 0, 'other': 0, - 'average_response_time_per_iteration_sec': 0 - } self.last_total_response_time = 0 self.last_total_count = 0 + self.data = { + 'successful_requests': 0, + 'redirects': 0, + 'bad_requests': 0, + 'server_errors': 0, + 'other_requests': 0, + '1xx': 0, + '2xx': 0, + '3xx': 0, + '4xx': 0, + '5xx': 0, + 'other': 0, + 'average_response_time_per_iteration_sec': 0, + } def _get_data(self): data = self._get_raw_data() diff --git a/collectors/python.d.plugin/traefik/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf index 909b9e549..e3f182d32 100644 --- a/collectors/python.d.plugin/traefik/traefik.conf +++ b/collectors/python.d.plugin/traefik/traefik.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 10 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, traefik plugin also supports the following: diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md index 3b4fa16fd..e213683ca 100644 --- a/collectors/python.d.plugin/unbound/README.md +++ b/collectors/python.d.plugin/unbound/README.md @@ -74,3 +74,5 @@ While it's a bit more complicated to set up correctly, it is recommended that you use a UNIX socket as it provides far better performance. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Funbound%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py index 52fcbf7e2..adb58d417 100644 --- a/collectors/python.d.plugin/unbound/unbound.chart.py +++ b/collectors/python.d.plugin/unbound/unbound.chart.py @@ -13,7 +13,11 @@ from bases.loaders import YamlOrderedLoader PRECISION = 1000 -ORDER = ['queries', 'recursion', 'reqlist'] +ORDER = [ + 'queries', + 'recursion', + 'reqlist', +] CHARTS = { 'queries': { diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf index 46c4b097f..68561366b 100644 --- a/collectors/python.d.plugin/unbound/unbound.conf +++ b/collectors/python.d.plugin/unbound/unbound.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_everye -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, unbound also supports the following: diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md index a062710df..9d455cfca 100644 --- a/collectors/python.d.plugin/uwsgi/README.md +++ b/collectors/python.d.plugin/uwsgi/README.md @@ -35,3 +35,5 @@ localhost: ``` When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fuwsgi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py index 5ebcfb55b..511b770cf 100644 --- a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py +++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py @@ -7,10 +7,6 @@ import json from copy import deepcopy from bases.FrameworkServices.SocketService import SocketService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 ORDER = [ 'requests', @@ -40,27 +36,27 @@ CHARTS = { ] }, 'tx': { - 'options': [None, 'Transmitted data', 'KB/s', 'requests', 'uwsgi.tx', 'stacked'], + 'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'], 'lines': [ ['tx', 'tx', 'incremental'] ] }, 'avg_rt': { - 'options': [None, 'Average request time', 'ms', 'requests', 'uwsgi.avg_rt', 'line'], + 'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'], 'lines': [ ['avg_rt', 'avg_rt', 'absolute'] ] }, 'memory_rss': { - 'options': [None, 'RSS (Resident Set Size)', 'MB', 'memory', 'uwsgi.memory_rss', 'stacked'], + 'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'], 'lines': [ - ['memory_rss', 'memory_rss', 'absolute', 1, 1024 * 1024] + ['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20] ] }, 'memory_vsz': { - 'options': [None, 'VSZ (Virtual Memory Size)', 'MB', 'memory', 'uwsgi.memory_vsz', 'stacked'], + 'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'], 'lines': [ - ['memory_vsz', 'memory_vsz', 'absolute', 1, 1024 * 1024] + ['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20] ] }, 'exceptions': { @@ -87,15 +83,13 @@ CHARTS = { class Service(SocketService): def __init__(self, configuration=None, name=None): super(Service, self).__init__(configuration=configuration, name=name) - self.url = self.configuration.get('host', 'localhost') - self.port = self.configuration.get('port', 1717) self.order = ORDER self.definitions = deepcopy(CHARTS) - + self.url = self.configuration.get('host', 'localhost') + self.port = self.configuration.get('port', 1717) # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time for chart in DYNAMIC_CHARTS: self.definitions[chart]['lines'] = [] - self.last_result = {} self.workers = [] diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf index be1c2ada3..7d09e7330 100644 --- a/collectors/python.d.plugin/uwsgi/uwsgi.conf +++ b/collectors/python.d.plugin/uwsgi/uwsgi.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, uwsgi also supports the following: diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md index 96c7cafaa..44d64efe1 100644 --- a/collectors/python.d.plugin/varnish/README.md +++ b/collectors/python.d.plugin/varnish/README.md @@ -64,6 +64,14 @@ It produces: ### configuration -No configuration is needed. +Only one parameter is supported: + +```yaml +instance_name: 'name' +``` + +The name of the varnishd instance to get logs from. If not specified, the host name is used. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fvarnish%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py index d889c2b33..da6781576 100644 --- a/collectors/python.d.plugin/varnish/varnish.chart.py +++ b/collectors/python.d.plugin/varnish/varnish.chart.py @@ -8,10 +8,6 @@ import re from bases.collection import find_binary from bases.FrameworkServices.ExecutableService import ExecutableService -# default module values (can be overridden per job in `config`) -# update_every = 2 -priority = 60000 -retries = 60 ORDER = [ 'session_connections', @@ -47,7 +43,7 @@ CHARTS = { ] }, 'all_time_hit_rate': { - 'options': [None, 'All History Hit Rate Ratio', 'percent', 'cache performance', + 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance', 'varnish.all_time_hit_rate', 'stacked'], 'lines': [ ['cache_hit', 'hit', 'percentage-of-absolute-row'], @@ -55,7 +51,7 @@ CHARTS = { ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']] }, 'current_poll_hit_rate': { - 'options': [None, 'Current Poll Hit Rate Ratio', 'percent', 'cache performance', + 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance', 'varnish.current_poll_hit_rate', 'stacked'], 'lines': [ ['cache_hit', 'hit', 'percentage-of-incremental-row'], @@ -127,7 +123,7 @@ CHARTS = { ] }, 'memory_usage': { - 'options': [None, 'Memory Usage', 'MB', 'memory usage', 'varnish.memory_usage', 'stacked'], + 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'], 'lines': [ ['memory_free', 'free', 'absolute', 1, 1 << 20], ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]] @@ -140,6 +136,8 @@ CHARTS = { } } +VARNISHSTAT = 'varnishstat' + class Parser: _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)') @@ -176,19 +174,31 @@ class Service(ExecutableService): ExecutableService.__init__(self, configuration=configuration, name=name) self.order = ORDER self.definitions = CHARTS - varnishstat = find_binary('varnishstat') - self.command = [varnishstat, '-1'] if varnishstat else None + self.instance_name = configuration.get('instance_name') self.parser = Parser() + self.command = None + + def create_command(self): + varnishstat = find_binary(VARNISHSTAT) + + if not varnishstat: + self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT)) + return False + + if self.instance_name: + self.command = [varnishstat, '-1', '-n', self.instance_name, '-t', '1'] + else: + self.command = [varnishstat, '-1', '-t', '1'] + return True def check(self): - if not self.command: - self.error("Can't locate 'varnishstat' binary or binary is not executable by user netdata") + if not self.create_command(): return False # STDOUT is not empty reply = self._get_raw_data() if not reply: - self.error("No output from 'varnishstat'. Not enough privileges?") + self.error("No output from 'varnishstat'. Is it running? Not enough privileges?") return False self.parser.init(reply) diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf index 4b069d514..54bfe4dee 100644 --- a/collectors/python.d.plugin/varnish/varnish.conf +++ b/collectors/python.d.plugin/varnish/varnish.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,11 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # +# Additionally to the above, varnish also supports the following: +# +# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used. +# # ---------------------------------------------------------------------- diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md index b18f08351..94717c812 100644 --- a/collectors/python.d.plugin/w1sensor/README.md +++ b/collectors/python.d.plugin/w1sensor/README.md @@ -11,3 +11,5 @@ Charts are created dynamically based on the number of detected sensors. For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file. --- + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fw1sensor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py index 493c4a135..e50312fc5 100644 --- a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py +++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py @@ -16,7 +16,9 @@ W1_DIR = '/sys/bus/w1/devices/' # Lines matching the following regular expression contain a temperature value RE_TEMP = re.compile(r' t=(\d+)') -ORDER = ['temp'] +ORDER = [ + 'temp', +] CHARTS = { 'temp': { diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf index a4aed8dd7..17271001b 100644 --- a/collectors/python.d.plugin/w1sensor/w1sensor.conf +++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -58,7 +56,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 5 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, example also supports the following: diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md index e25a03fb3..176551cf4 100644 --- a/collectors/python.d.plugin/web_log/README.md +++ b/collectors/python.d.plugin/web_log/README.md @@ -21,7 +21,7 @@ netdata turns this "useless" log file, into a powerful performance and health mo If netdata is installed on a system running a web server, it will detect it and it will automatically present a series of charts, with information obtained from the web server API, like these (*these do not come from the web server log file*): ![image](https://cloud.githubusercontent.com/assets/2662304/22900686/e283f636-f237-11e6-93d2-cbdf63de150c.png) -*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stab_status`).* +*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stub_status`).* > [**netdata**](https://my-netdata.io/) supports `apache`, `nginx`, `lighttpd` and `tomcat`. To obtain real-time information from a web server API, the web server needs to expose it. For directions on configuring your web server, check the config files for each web server. There is a directory with a config file for each web server under [`/etc/netdata/python.d/`](../). @@ -199,3 +199,5 @@ The column `minimum requests` state the minimum number of requests required for [**netdata**](https://my-netdata.io/) alarms are user configurable. Sample config files can be found under directory `health/health.d` of the netdata github repository. So, even [`web_log` alarms can be adapted to your needs](../../../health/health.d/web_log.conf). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fweb_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py index 20e15f4cb..992790462 100644 --- a/collectors/python.d.plugin/web_log/web_log.chart.py +++ b/collectors/python.d.plugin/web_log/web_log.chart.py @@ -25,7 +25,9 @@ from bases.collection import read_last_line from bases.FrameworkServices.LogService import LogService -ORDER_APACHE_CACHE = ['apache_cache'] +ORDER_APACHE_CACHE = [ + 'apache_cache', +] ORDER_WEB = [ 'response_statuses', @@ -182,7 +184,7 @@ CHARTS_WEB = { CHARTS_APACHE_CACHE = { 'apache_cache': { - 'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache', + 'options': [None, 'Apache Cached Responses', 'percentage', 'cached', 'web_log.apache_cache_cache', 'stacked'], 'lines': [ ['hit', 'cache', 'percentage-of-absolute-row'], diff --git a/collectors/python.d.plugin/web_log/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf index a67957aef..0ac17f665 100644 --- a/collectors/python.d.plugin/web_log/web_log.conf +++ b/collectors/python.d.plugin/web_log/web_log.conf @@ -27,11 +27,9 @@ # If unset, the default for python.d.plugin is used. # priority: 60000 -# retries sets the number of retries to be made in case of failures. -# If unset, the default for python.d.plugin is used. -# Attempts to restore the service are made once every update_every -# and only if the module has collected values in the past. -# retries: 60 +# penalty indicates whether to apply penalty to update_every in case of failures. +# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. +# penalty: yes # autodetection_retry sets the job re-check interval in seconds. # The job is not deleted if check fails. @@ -61,7 +59,7 @@ # # JOBs sharing a name are mutually exclusive # update_every: 1 # the JOB's data collection frequency # priority: 60000 # the JOB's order on the dashboard -# retries: 60 # the JOB's number of restoration attempts +# penalty: yes # the JOB's penalty # autodetection_retry: 0 # the JOB's re-check interval in seconds # # Additionally to the above, web_log also supports the following: diff --git a/collectors/statsd.plugin/.keep b/collectors/statsd.plugin/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/collectors/statsd.plugin/Makefile.am b/collectors/statsd.plugin/Makefile.am index 7f09bacd7..e63bf98b4 100644 --- a/collectors/statsd.plugin/Makefile.am +++ b/collectors/statsd.plugin/Makefile.am @@ -9,12 +9,11 @@ dist_noinst_DATA = \ statsdconfigdir=$(libconfigdir)/statsd.d dist_statsdconfig_DATA = \ - $(top_srcdir)/installer/.keep \ example.conf \ $(NULL) userstatsdconfigdir=$(configdir)/statsd.d dist_userstatsdconfig_DATA = \ - $(top_srcdir)/installer/.keep \ + .keep \ $(NULL) diff --git a/collectors/statsd.plugin/Makefile.in b/collectors/statsd.plugin/Makefile.in deleted file mode 100644 index 5c16a86d1..000000000 --- a/collectors/statsd.plugin/Makefile.in +++ /dev/null @@ -1,556 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = collectors/statsd.plugin -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \ - $(dist_userstatsdconfig_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(statsdconfigdir)" \ - "$(DESTDIR)$(userstatsdconfigdir)" -DATA = $(dist_noinst_DATA) $(dist_statsdconfig_DATA) \ - $(dist_userstatsdconfig_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -statsdconfigdir = $(libconfigdir)/statsd.d -dist_statsdconfig_DATA = \ - $(top_srcdir)/installer/.keep \ - example.conf \ - $(NULL) - -userstatsdconfigdir = $(configdir)/statsd.d -dist_userstatsdconfig_DATA = \ - $(top_srcdir)/installer/.keep \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/statsd.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_statsdconfigDATA: $(dist_statsdconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(statsdconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(statsdconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(statsdconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(statsdconfigdir)" || exit $$?; \ - done - -uninstall-dist_statsdconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_statsdconfig_DATA)'; test -n "$(statsdconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(statsdconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_userstatsdconfigDATA: $(dist_userstatsdconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(userstatsdconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(userstatsdconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userstatsdconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(userstatsdconfigdir)" || exit $$?; \ - done - -uninstall-dist_userstatsdconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_userstatsdconfig_DATA)'; test -n "$(userstatsdconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(userstatsdconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(statsdconfigdir)" "$(DESTDIR)$(userstatsdconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_statsdconfigDATA \ - install-dist_userstatsdconfigDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_statsdconfigDATA \ - uninstall-dist_userstatsdconfigDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_statsdconfigDATA \ - install-dist_userstatsdconfigDATA install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_statsdconfigDATA \ - uninstall-dist_userstatsdconfigDATA - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md index 6ef038343..399918dc9 100644 --- a/collectors/statsd.plugin/README.md +++ b/collectors/statsd.plugin/README.md @@ -1,22 +1,20 @@ -# Netdata Statsd +# statsd.plugin statsd is a system to collect data from any application. Applications are sending metrics to it, usually via non-blocking UDP communication, and statsd servers collect these metrics, perform a few simple calculations on them and push them to backend time-series databases. There is a [plethora of client libraries](https://github.com/etsy/statsd/wiki#client-implementations) for embedding statsd metrics to any application framework. This makes statsd quite popular for custom application metrics. -## netdata statsd - netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize them on its dashboards, stream them to other netdata servers or archive them to backend time-series databases. -netdata statsd is inside netdata (an internal plugin, running inside the netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd ports (tcp and udp 8125 - yes, netdata statsd server supports both tcp and udp at the same time). +Netdata statsd is inside Netdata (an internal plugin, running inside the Netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd ports (tcp and udp 8125 - yes, Netdata statsd server supports both tcp and udp at the same time). -Since statsd is embedded in netdata, it means you now have a statsd server embedded on all your servers. So, the application can send its metrics to `localhost:8125`. This provides a distributed statsd implementation. +Since statsd is embedded in Netdata, it means you now have a statsd server embedded on all your servers. So, the application can send its metrics to `localhost:8125`. This provides a distributed statsd implementation. -netdata statsd is fast. It can collect more than **1.200.000 metrics per second** on modern hardware, more than **200Mbps of sustained statsd traffic**, using 1 CPU core (yes, it is single threaded - actually double-threaded, one thread collects metrics, another one updates the charts from the collected data). +Netdata statsd is fast. It can collect more than **1.200.000 metrics per second** on modern hardware, more than **200Mbps of sustained statsd traffic**, using 1 CPU core (yes, it is single threaded - actually double-threaded, one thread collects metrics, another one updates the charts from the collected data). -## metrics supported by netdata +## Metrics supported by Netdata -netdata fully supports the statsd protocol. All statsd client libraries can be used with netdata too. +Netdata fully supports the statsd protocol. All statsd client libraries can be used with Netdata too. - **Gauges** @@ -521,3 +519,5 @@ statsd "metric1:10|g" "metric2:10|c" ... ``` The function is smart enough to call `nc` just once and pass all the metrics to it. It will also automatically switch to TCP if the metrics to send are above 1000 bytes. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fstatsd.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c index c92bfd1c2..534466a04 100644 --- a/collectors/statsd.plugin/statsd.c +++ b/collectors/statsd.plugin/statsd.c @@ -2383,7 +2383,7 @@ void *statsd_main(void *ptr) { , "statsd" , NULL , "statsd server TCP connected sockets" - , "connected" + , "sockets" , PLUGIN_STATSD_NAME , "stats" , 132016 diff --git a/collectors/tc.plugin/Makefile.in b/collectors/tc.plugin/Makefile.in deleted file mode 100644 index d336e1f0d..000000000 --- a/collectors/tc.plugin/Makefile.in +++ /dev/null @@ -1,562 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \ - $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \ - $(dist_noinst_DATA) -subdir = collectors/tc.plugin -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(pluginsdir)" -SCRIPTS = $(dist_plugins_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - tc-qos-helper.sh \ - $(NULL) - -SUFFIXES = .in -dist_plugins_SCRIPTS = \ - tc-qos-helper.sh \ - $(NULL) - -dist_noinst_DATA = \ - tc-qos-helper.sh.in \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -.SUFFIXES: .in -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu collectors/tc.plugin/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/build/subst.inc: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pluginsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pluginsdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_pluginsSCRIPTS - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_pluginsSCRIPTS - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_pluginsSCRIPTS install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-ps install-ps-am \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ - uninstall-am uninstall-dist_pluginsSCRIPTS - -.in: - if sed \ - -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ - -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ - -e 's#[@]configdir_POST@#$(configdir)#g' \ - -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ - -e 's#[@]cachedir_POST@#$(cachedir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md index a8b151de3..4dc1a1d22 100644 --- a/collectors/tc.plugin/README.md +++ b/collectors/tc.plugin/README.md @@ -1,4 +1,4 @@ -## tc.plugin +# tc.plugin Live demo - **[see it in action here](https://registry.my-netdata.io/#menu_tc)** ! @@ -6,15 +6,11 @@ Live demo - **[see it in action here](https://registry.my-netdata.io/#menu_tc)** Netdata monitors `tc` QoS classes for all interfaces. -If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/) it will collect -interface and class names. +If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/) it will collect interface and class names. -There is a [shell helper](tc-qos-helper.sh.in) for this (all parsing is done by the plugin -in `C` code - this shell script is just a configuration for the command to run to get `tc` output). +There is a [shell helper](tc-qos-helper.sh.in) for this (all parsing is done by the plugin in `C` code - this shell script is just a configuration for the command to run to get `tc` output). -The source of the tc plugin is [here](plugin_tc.c). It is somewhat complex, because a state -machine was needed to keep track of all the `tc` classes, including the pseudo classes tc -dynamically creates. +The source of the tc plugin is [here](plugin_tc.c). It is somewhat complex, because a state machine was needed to keep track of all the `tc` classes, including the pseudo classes tc dynamically creates. ## Motivation @@ -80,11 +76,16 @@ Once **traffic classification** is applied, we can use **[netdata](https://githu QoS, is extremely light. You will configure it once, and this is it. It will not bother you again and it will not use any noticeable CPU resources, especially on application and database servers. ---- +This is QoS from a home linux router. Check these features: + +1. It is real-time (per second updates) +2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it. -## QoS in Linux? Have you lost your mind? +![test2](https://cloud.githubusercontent.com/assets/2662304/14093004/68966020-f553-11e5-98fe-ffee2086fafd.gif) + +--- -Yes I know... but no, I have not! +## QoS in Linux? Of course, `tc` is probably **the most undocumented, complicated and unfriendly** command in Linux. @@ -108,17 +109,13 @@ For example, do you know that for matching a simple port range in `tc`, e.g. all 32768/0x8000 ``` -I know what you are thinking right now! **And I agree!** +To do it the hard way, you can go through the [tc configuration steps](#qos-configuration-with-tc). An easier way is to use **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, a tool that simplifies QoS management in Linux. -This is why I wrote **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, a tool to simplify QoS management in Linux. +## Qos Configuration with FireHOL The **[FireHOL](https://firehol.org/)** package already distributes **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**. Check the **[FireQOS tutorial](https://firehol.org/tutorial/fireqos-new-user/)** to learn how to write your own QoS configuration. -With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone to use QoS in Linux**. Just install the package `firehol`. It should already be available for your distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**. After that, you will have the `fireqos` command which uses a configuration like the following: - -## QoS Configuration - -This is the file `/etc/firehol/fireqos.conf` we use at the netdata demo site: +With **[FireQOS](https://firehol.org/tutorial/fireqos-new-user/)**, it is **really simple for everyone to use QoS in Linux**. Just install the package `firehol`. It should already be available for your distribution. If not, check the **[FireHOL Installation Guide](https://firehol.org/installing/)**. After that, you will have the `fireqos` command which uses a configuration like the following `/etc/firehol/fireqos.conf`, used at the netdata demo site: ```sh # configure the netdata ports @@ -166,15 +163,33 @@ And this is what you are going to get: ![image](https://cloud.githubusercontent.com/assets/2662304/14436322/c91d90a4-0024-11e6-9fb1-57cdef1580df.png) ---- - -## More examples: +## QoS Configuration with tc + +First, setup the tc rules in rc.local using commands to assign different DSCP markings to different classids. You can see one such example in [github issue #4563](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973). + +Then, map the classids to names by creating `/etc/iproute2/tc_cls`. For example: +```2:1 Standard +2:8 LowPriorityData +2:10 HighThroughputData +2:16 OAM +2:18 LowLatencyData +2:24 BroadcastVideo +2:26 MultimediaStreaming +2:32 RealTimeInteractive +2:34 MultimediaConferencing +2:40 Signalling +2:46 Telephony +2:48 NetworkControl +``` -This is QoS from my home linux router. Check these features: +Add the following configuration option in `/etc/netdata.conf`: +```[plugin:tc] + enable show all classes and qdiscs for all interfaces = yes +``` -1. It is real-time (per second updates) -2. QoS really works in Linux - check that the `background` traffic is squeezed when `surfing` needs it. +Finally, create `/etc/netdata/tc-qos-helper.conf` with this content: +```tc_show="class"``` -![test2](https://cloud.githubusercontent.com/assets/2662304/14093004/68966020-f553-11e5-98fe-ffee2086fafd.gif) +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Ftc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/collectors/tc.plugin/tc-qos-helper.sh b/collectors/tc.plugin/tc-qos-helper.sh deleted file mode 100644 index a1a2b9145..000000000 --- a/collectors/tc.plugin/tc-qos-helper.sh +++ /dev/null @@ -1,315 +0,0 @@ -#!/usr/bin/env bash - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis -# SPDX-License-Identifier: GPL-3.0-or-later -# -# This script is a helper to allow netdata collect tc data. -# tc output parsing has been implemented in C, inside netdata -# This script allows setting names to dimensions. - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" -export LC_ALL=C - - -# ----------------------------------------------------------------------------- -# logging functions - -PROGRAM_FILE="$0" -PROGRAM_NAME="$(basename $0)" -PROGRAM_NAME="${PROGRAM_NAME/.plugin}" - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - exit 1 -} - -debug=0 -debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" -} - -# ----------------------------------------------------------------------------- -# find /var/run/fireqos - -# the default -fireqos_run_dir="/var/run/fireqos" - -function realdir { - local r="$1" - local t=$(readlink "$r") - - while [ "$t" ] - do - r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t") - t=$(readlink "$r") - done - - dirname "$r" -} - -if [ ! -d "${fireqos_run_dir}" ] - then - - # the fireqos executable - we will use it to find its config - fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)" - - if [ ! -z "${fireqos}" ] - then - - fireqos_exec_dir="$(realdir ${fireqos})" - - if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ] - then - - LOCALSTATEDIR= - source "${fireqos_exec_dir}/install.config" - - if [ -d "${LOCALSTATEDIR}/run/fireqos" ] - then - fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos" - else - warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)." - fi - else - warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'." - fi - else - warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin" - fi -fi - -# ----------------------------------------------------------------------------- - -[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" -[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" -[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" - -plugins_dir="${NETDATA_PLUGINS_DIR}" -tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)" - - -# ----------------------------------------------------------------------------- -# user configuration - -# time in seconds to refresh QoS class/qdisc names -qos_get_class_names_every=120 - -# time in seconds to exit - netdata will restart the script -qos_exit_every=3600 - -# what to use? classes or qdiscs? -tc_show="qdisc" # can also be "class" - - -# ----------------------------------------------------------------------------- -# check if we have a valid number for interval - -t=${1} -update_every=$((t)) -[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY} -[ $((update_every)) -lt 1 ] && update_every=1 - - -# ----------------------------------------------------------------------------- -# allow the user to override our defaults - -for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf" -do - if [ -f "${CONFIG}" ] - then - info "Loading config file '${CONFIG}'..." - source "${CONFIG}" - [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." - else - warning "Cannot find file '${CONFIG}'." - fi -done - -case "${tc_show}" in - qdisc|class) - ;; - - *) - error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'." - tc_show="qdisc" - ;; -esac - - -# ----------------------------------------------------------------------------- -# default sleep function - -LOOPSLEEPMS_LASTWORK=0 -loopsleepms() { - sleep $1 -} - -# if found and included, this file overwrites loopsleepms() -# with a high resolution timer function for precise looping. -. "${plugins_dir}/loopsleepms.sh.inc" - - -# ----------------------------------------------------------------------------- -# final checks we can run - -if [ -z "${tc}" -o ! -x "${tc}" ] - then - fatal "cannot find command 'tc' in this system." -fi - -tc_devices= -fix_names= - -# ----------------------------------------------------------------------------- - -setclassname() { - if [ "${tc_show}" = "qdisc" ] - then - echo "SETCLASSNAME $4 $2" - else - echo "SETCLASSNAME $3 $2" - fi -} - -show_tc_cls() { - [ "${tc_show}" = "qdisc" ] && return 1 - - local x="${1}" - - if [ -f /etc/iproute2/tc_cls ] - then - local classid name rest - while read classid name rest - do - [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue - setclassname "" "${name}" "${classid}" - done &2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" } warning() { - log WARNING "${@}" + log WARNING "${@}" } error() { - log ERROR "${@}" + log ERROR "${@}" } info() { - log INFO "${@}" + log INFO "${@}" } fatal() { - log FATAL "${@}" - exit 1 + log FATAL "${@}" + exit 1 } debug=0 debug() { - [ $debug -eq 1 ] && log DEBUG "${@}" + [ $debug -eq 1 ] && log DEBUG "${@}" } # ----------------------------------------------------------------------------- @@ -60,59 +58,55 @@ debug() { # the default fireqos_run_dir="/var/run/fireqos" -function realdir { - local r="$1" - local t=$(readlink "$r") +function realdir() { + local r + local t + r="$1" + t="$(readlink "$r")" - while [ "$t" ] - do - r=$(cd $(dirname "$r") && cd $(dirname "$t") && pwd -P)/$(basename "$t") - t=$(readlink "$r") - done + while [ "$t" ]; do + r=$(cd "$(dirname "$r")" && cd "$(dirname "$t")" && pwd -P)/$(basename "$t") + t=$(readlink "$r") + done - dirname "$r" + dirname "$r" } -if [ ! -d "${fireqos_run_dir}" ] - then - - # the fireqos executable - we will use it to find its config - fireqos="$(which fireqos 2>/dev/null || command -v fireqos 2>/dev/null)" +if [ ! -d "${fireqos_run_dir}" ]; then - if [ ! -z "${fireqos}" ] - then + # the fireqos executable - we will use it to find its config + fireqos="$(command -v fireqos 2>/dev/null)" - fireqos_exec_dir="$(realdir ${fireqos})" + if [ -n "${fireqos}" ]; then - if [ ! -z "${fireqos_exec_dir}" -a "${fireqos_exec_dir}" != "." -a -f "${fireqos_exec_dir}/install.config" ] - then + fireqos_exec_dir="$(realdir "${fireqos}")" - LOCALSTATEDIR= - source "${fireqos_exec_dir}/install.config" + if [ -n "${fireqos_exec_dir}" ] && [ "${fireqos_exec_dir}" != "." ] && [ -f "${fireqos_exec_dir}/install.config" ]; then + LOCALSTATEDIR= + #shellcheck source=/dev/null + source "${fireqos_exec_dir}/install.config" - if [ -d "${LOCALSTATEDIR}/run/fireqos" ] - then - fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos" - else - warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)." - fi - else - warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'." - fi - else - warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin" - fi + if [ -d "${LOCALSTATEDIR}/run/fireqos" ]; then + fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos" + else + warning "FireQoS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)." + fi + else + warning "Although FireQoS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'." + fi + else + warning "FireQoS is not installed on this system. Use FireQoS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin" + fi fi # ----------------------------------------------------------------------------- [ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")" -[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" +[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@" [ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" plugins_dir="${NETDATA_PLUGINS_DIR}" -tc="$(which tc 2>/dev/null || command -v tc 2>/dev/null)" - +tc="$(command -v tc 2>/dev/null)" # ----------------------------------------------------------------------------- # user configuration @@ -126,7 +120,6 @@ qos_exit_every=3600 # what to use? classes or qdiscs? tc_show="qdisc" # can also be "class" - # ----------------------------------------------------------------------------- # check if we have a valid number for interval @@ -135,52 +128,46 @@ update_every=$((t)) [ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY} [ $((update_every)) -lt 1 ] && update_every=1 - # ----------------------------------------------------------------------------- # allow the user to override our defaults -for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf" -do - if [ -f "${CONFIG}" ] - then - info "Loading config file '${CONFIG}'..." - source "${CONFIG}" - [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." - else - warning "Cannot find file '${CONFIG}'." - fi +for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"; do + if [ -f "${CONFIG}" ]; then + info "Loading config file '${CONFIG}'..." + #shellcheck source=/dev/null + source "${CONFIG}" || error "Failed to load config file '${CONFIG}'." + else + warning "Cannot find file '${CONFIG}'." + fi done case "${tc_show}" in - qdisc|class) - ;; +qdisc | class) ;; - *) - error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'." - tc_show="qdisc" - ;; +*) + error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'." + tc_show="qdisc" + ;; esac - # ----------------------------------------------------------------------------- # default sleep function LOOPSLEEPMS_LASTWORK=0 loopsleepms() { - sleep $1 + sleep "$1" } # if found and included, this file overwrites loopsleepms() # with a high resolution timer function for precise looping. +#shellcheck source=/dev/null . "${plugins_dir}/loopsleepms.sh.inc" - # ----------------------------------------------------------------------------- # final checks we can run -if [ -z "${tc}" -o ! -x "${tc}" ] - then - fatal "cannot find command 'tc' in this system." +if [ -z "${tc}" ] || [ ! -x "${tc}" ]; then + fatal "cannot find command 'tc' in this system." fi tc_devices= @@ -189,94 +176,91 @@ fix_names= # ----------------------------------------------------------------------------- setclassname() { - if [ "${tc_show}" = "qdisc" ] - then - echo "SETCLASSNAME $4 $2" - else - echo "SETCLASSNAME $3 $2" - fi + if [ "${tc_show}" = "qdisc" ]; then + echo "SETCLASSNAME $4 $2" + else + echo "SETCLASSNAME $3 $2" + fi } show_tc_cls() { - [ "${tc_show}" = "qdisc" ] && return 1 - - local x="${1}" - - if [ -f /etc/iproute2/tc_cls ] - then - local classid name rest - while read classid name rest - do - [ -z "${classid}" -o -z "${name}" -o "${classid}" = "#" -o "${name}" = "#" -o "${classid:0:1}" = "#" -o "${name:0:1}" = "#" ] && continue - setclassname "" "${name}" "${classid}" - done /dev/null)" + [ -n "${l}" ] && tc_devices="${tc_devices} ${dev}" + done } # update devices and class names @@ -289,27 +273,24 @@ exit_after=$((qos_exit_every / update_every)) c=0 gc=0 -while [ 1 ] -do - fix_names= - c=$((c + 1)) - gc=$((gc + 1)) +while true; do + fix_names= + c=$((c + 1)) + gc=$((gc + 1)) - if [ ${c} -le 1 -o ${c} -ge ${names_every} ] - then - c=1 - fix_names="YES" - find_tc_devices - fi + if [ ${c} -le 1 ] || [ ${c} -ge ${names_every} ]; then + c=1 + fix_names="YES" + find_tc_devices + fi - for d in ${tc_devices} - do - show_tc ${d} - done + for d in ${tc_devices}; do + show_tc "${d}" + done - echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}" + echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}" - loopsleepms ${update_every} + loopsleepms ${update_every} - [ ${gc} -gt ${exit_after} ] && exit 0 + [ ${gc} -gt ${exit_after} ] && exit 0 done diff --git a/compile b/compile deleted file mode 100755 index 531136b06..000000000 --- a/compile +++ /dev/null @@ -1,347 +0,0 @@ -#! /bin/sh -# Wrapper for compilers which do not understand '-c -o'. - -scriptversion=2012-10-14.11; # UTC - -# Copyright (C) 1999-2013 Free Software Foundation, Inc. -# Written by Tom Tromey . -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -# This file is maintained in Automake, please report -# bugs to or send patches to -# . - -nl=' -' - -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent tools from complaining about whitespace usage. -IFS=" "" $nl" - -file_conv= - -# func_file_conv build_file lazy -# Convert a $build file to $host form and store it in $file -# Currently only supports Windows hosts. If the determined conversion -# type is listed in (the comma separated) LAZY, no conversion will -# take place. -func_file_conv () -{ - file=$1 - case $file in - / | /[!/]*) # absolute file, and not a UNC file - if test -z "$file_conv"; then - # lazily determine how to convert abs files - case `uname -s` in - MINGW*) - file_conv=mingw - ;; - CYGWIN*) - file_conv=cygwin - ;; - *) - file_conv=wine - ;; - esac - fi - case $file_conv/,$2, in - *,$file_conv,*) - ;; - mingw/*) - file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` - ;; - cygwin/*) - file=`cygpath -m "$file" || echo "$file"` - ;; - wine/*) - file=`winepath -w "$file" || echo "$file"` - ;; - esac - ;; - esac -} - -# func_cl_dashL linkdir -# Make cl look for libraries in LINKDIR -func_cl_dashL () -{ - func_file_conv "$1" - if test -z "$lib_path"; then - lib_path=$file - else - lib_path="$lib_path;$file" - fi - linker_opts="$linker_opts -LIBPATH:$file" -} - -# func_cl_dashl library -# Do a library search-path lookup for cl -func_cl_dashl () -{ - lib=$1 - found=no - save_IFS=$IFS - IFS=';' - for dir in $lib_path $LIB - do - IFS=$save_IFS - if $shared && test -f "$dir/$lib.dll.lib"; then - found=yes - lib=$dir/$lib.dll.lib - break - fi - if test -f "$dir/$lib.lib"; then - found=yes - lib=$dir/$lib.lib - break - fi - if test -f "$dir/lib$lib.a"; then - found=yes - lib=$dir/lib$lib.a - break - fi - done - IFS=$save_IFS - - if test "$found" != yes; then - lib=$lib.lib - fi -} - -# func_cl_wrapper cl arg... -# Adjust compile command to suit cl -func_cl_wrapper () -{ - # Assume a capable shell - lib_path= - shared=: - linker_opts= - for arg - do - if test -n "$eat"; then - eat= - else - case $1 in - -o) - # configure might choose to run compile as 'compile cc -o foo foo.c'. - eat=1 - case $2 in - *.o | *.[oO][bB][jJ]) - func_file_conv "$2" - set x "$@" -Fo"$file" - shift - ;; - *) - func_file_conv "$2" - set x "$@" -Fe"$file" - shift - ;; - esac - ;; - -I) - eat=1 - func_file_conv "$2" mingw - set x "$@" -I"$file" - shift - ;; - -I*) - func_file_conv "${1#-I}" mingw - set x "$@" -I"$file" - shift - ;; - -l) - eat=1 - func_cl_dashl "$2" - set x "$@" "$lib" - shift - ;; - -l*) - func_cl_dashl "${1#-l}" - set x "$@" "$lib" - shift - ;; - -L) - eat=1 - func_cl_dashL "$2" - ;; - -L*) - func_cl_dashL "${1#-L}" - ;; - -static) - shared=false - ;; - -Wl,*) - arg=${1#-Wl,} - save_ifs="$IFS"; IFS=',' - for flag in $arg; do - IFS="$save_ifs" - linker_opts="$linker_opts $flag" - done - IFS="$save_ifs" - ;; - -Xlinker) - eat=1 - linker_opts="$linker_opts $2" - ;; - -*) - set x "$@" "$1" - shift - ;; - *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) - func_file_conv "$1" - set x "$@" -Tp"$file" - shift - ;; - *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) - func_file_conv "$1" mingw - set x "$@" "$file" - shift - ;; - *) - set x "$@" "$1" - shift - ;; - esac - fi - shift - done - if test -n "$linker_opts"; then - linker_opts="-link$linker_opts" - fi - exec "$@" $linker_opts - exit 1 -} - -eat= - -case $1 in - '') - echo "$0: No command. Try '$0 --help' for more information." 1>&2 - exit 1; - ;; - -h | --h*) - cat <<\EOF -Usage: compile [--help] [--version] PROGRAM [ARGS] - -Wrapper for compilers which do not understand '-c -o'. -Remove '-o dest.o' from ARGS, run PROGRAM with the remaining -arguments, and rename the output as expected. - -If you are trying to build a whole package this is not the -right script to run: please start by reading the file 'INSTALL'. - -Report bugs to . -EOF - exit $? - ;; - -v | --v*) - echo "compile $scriptversion" - exit $? - ;; - cl | *[/\\]cl | cl.exe | *[/\\]cl.exe ) - func_cl_wrapper "$@" # Doesn't return... - ;; -esac - -ofile= -cfile= - -for arg -do - if test -n "$eat"; then - eat= - else - case $1 in - -o) - # configure might choose to run compile as 'compile cc -o foo foo.c'. - # So we strip '-o arg' only if arg is an object. - eat=1 - case $2 in - *.o | *.obj) - ofile=$2 - ;; - *) - set x "$@" -o "$2" - shift - ;; - esac - ;; - *.c) - cfile=$1 - set x "$@" "$1" - shift - ;; - *) - set x "$@" "$1" - shift - ;; - esac - fi - shift -done - -if test -z "$ofile" || test -z "$cfile"; then - # If no '-o' option was seen then we might have been invoked from a - # pattern rule where we don't need one. That is ok -- this is a - # normal compilation that the losing compiler can handle. If no - # '.c' file was seen then we are probably linking. That is also - # ok. - exec "$@" -fi - -# Name of file we expect compiler to create. -cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` - -# Create the lock directory. -# Note: use '[/\\:.-]' here to ensure that we don't use the same name -# that we are using for the .o file. Also, base the name on the expected -# object file name, since that is what matters with a parallel build. -lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d -while true; do - if mkdir "$lockdir" >/dev/null 2>&1; then - break - fi - sleep 1 -done -# FIXME: race condition here if user kills between mkdir and trap. -trap "rmdir '$lockdir'; exit 1" 1 2 15 - -# Run the compile. -"$@" -ret=$? - -if test -f "$cofile"; then - test "$cofile" = "$ofile" || mv "$cofile" "$ofile" -elif test -f "${cofile}bj"; then - test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" -fi - -rmdir "$lockdir" -exit $ret - -# Local Variables: -# mode: shell-script -# sh-indentation: 2 -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" -# time-stamp-end: "; # UTC" -# End: diff --git a/config.guess b/config.guess deleted file mode 100755 index b79252d6b..000000000 --- a/config.guess +++ /dev/null @@ -1,1558 +0,0 @@ -#! /bin/sh -# Attempt to guess a canonical system name. -# Copyright 1992-2013 Free Software Foundation, Inc. - -timestamp='2013-06-10' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). -# -# Originally written by Per Bothner. -# -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD -# -# Please send patches with a ChangeLog entry to config-patches@gnu.org. - - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] - -Output the configuration name of the system \`$me' is run on. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.guess ($timestamp) - -Originally written by Per Bothner. -Copyright 1992-2013 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" >&2 - exit 1 ;; - * ) - break ;; - esac -done - -if test $# != 0; then - echo "$me: too many arguments$help" >&2 - exit 1 -fi - -trap 'exit 1' 1 2 15 - -# CC_FOR_BUILD -- compiler used by this script. Note that the use of a -# compiler to aid in system detection is discouraged as it requires -# temporary files to be created and, as you can see below, it is a -# headache to deal with in a portable fashion. - -# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still -# use `HOST_CC' if defined, but it is deprecated. - -# Portable tmp directory creation inspired by the Autoconf team. - -set_cc_for_build=' -trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; -trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; -: ${TMPDIR=/tmp} ; - { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || - { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || - { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || - { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; -dummy=$tmp/dummy ; -tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; -case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; - for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then - CC_FOR_BUILD="$c"; break ; - fi ; - done ; - if test x"$CC_FOR_BUILD" = x ; then - CC_FOR_BUILD=no_compiler_found ; - fi - ;; - ,,*) CC_FOR_BUILD=$CC ;; - ,*,*) CC_FOR_BUILD=$HOST_CC ;; -esac ; set_cc_for_build= ;' - -# This is needed to find uname on a Pyramid OSx when run in the BSD universe. -# (ghazi@noc.rutgers.edu 1994-08-24) -if (test -f /.attbin/uname) >/dev/null 2>&1 ; then - PATH=$PATH:/.attbin ; export PATH -fi - -UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown -UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown -UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown -UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown - -case "${UNAME_SYSTEM}" in -Linux|GNU|GNU/*) - # If the system lacks a compiler, then just pick glibc. - # We could probably try harder. - LIBC=gnu - - eval $set_cc_for_build - cat <<-EOF > $dummy.c - #include - #if defined(__UCLIBC__) - LIBC=uclibc - #elif defined(__dietlibc__) - LIBC=dietlibc - #else - LIBC=gnu - #endif - EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` - ;; -esac - -# Note: order is significant - the case branches are not exclusive. - -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in - *:NetBSD:*:*) - # NetBSD (nbsd) targets should (where applicable) match one or - # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, - # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently - # switched to ELF, *-*-netbsd* would select the old - # object file format. This provides both forward - # compatibility and a consistent mechanism for selecting the - # object file format. - # - # Note: NetBSD doesn't particularly care about the vendor - # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` - case "${UNAME_MACHINE_ARCH}" in - armeb) machine=armeb-unknown ;; - arm*) machine=arm-unknown ;; - sh3el) machine=shl-unknown ;; - sh3eb) machine=sh-unknown ;; - sh5el) machine=sh5le-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; - esac - # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. - case "${UNAME_MACHINE_ARCH}" in - arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build - if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ELF__ - then - # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). - # Return netbsd for either. FIX? - os=netbsd - else - os=netbsdelf - fi - ;; - *) - os=netbsd - ;; - esac - # The OS release - # Debian GNU/NetBSD machines have a different userland, and - # thus, need a distinct triplet. However, they do not need - # kernel version information, so it can be replaced with a - # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in - Debian*) - release='-gnu' - ;; - *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` - ;; - esac - # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: - # contains redundant information, the shorter form: - # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" - exit ;; - *:Bitrig:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} - exit ;; - *:OpenBSD:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} - exit ;; - *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} - exit ;; - *:SolidBSD:*:*) - echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} - exit ;; - macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd${UNAME_RELEASE} - exit ;; - *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} - exit ;; - alpha:OSF1:*:*) - case $UNAME_RELEASE in - *4.0) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` - ;; - *5.*) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` - ;; - esac - # According to Compaq, /usr/sbin/psrinfo has been available on - # OSF/1 and Tru64 systems produced since 1995. I hope that - # covers most systems running today. This code pipes the CPU - # types through head -n 1, so we only detect the type of CPU 0. - ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` - case "$ALPHA_CPU_TYPE" in - "EV4 (21064)") - UNAME_MACHINE="alpha" ;; - "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; - "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; - "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; - "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; - "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; - "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; - "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; - "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; - "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; - "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; - "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; - "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; - esac - # A Pn.n version is a patched version. - # A Vn.n version is a released version. - # A Tn.n version is a released field test version. - # A Xn.n version is an unreleased experimental baselevel. - # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - # Reset EXIT trap before exiting to avoid spurious non-zero exit code. - exitcode=$? - trap '' 0 - exit $exitcode ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 - exit ;; - Amiga*:UNIX_System_V:4.0:*) - echo m68k-unknown-sysv4 - exit ;; - *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos - exit ;; - *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos - exit ;; - *:OS/390:*:*) - echo i370-ibm-openedition - exit ;; - *:z/VM:*:*) - echo s390-ibm-zvmoe - exit ;; - *:OS400:*:*) - echo powerpc-ibm-os400 - exit ;; - arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} - exit ;; - arm*:riscos:*:*|arm*:RISCOS:*:*) - echo arm-unknown-riscos - exit ;; - SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) - echo hppa1.1-hitachi-hiuxmpp - exit ;; - Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) - # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. - if test "`(/bin/universe) 2>/dev/null`" = att ; then - echo pyramid-pyramid-sysv3 - else - echo pyramid-pyramid-bsd - fi - exit ;; - NILE*:*:*:dcosx) - echo pyramid-pyramid-svr4 - exit ;; - DRS?6000:unix:4.0:6*) - echo sparc-icl-nx6 - exit ;; - DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) - case `/usr/bin/uname -p` in - sparc) echo sparc-icl-nx7; exit ;; - esac ;; - s390x:SunOS:*:*) - echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux${UNAME_RELEASE} - exit ;; - i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) - eval $set_cc_for_build - SUN_ARCH="i386" - # If there is a compiler, see if it is configured for 64-bit objects. - # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. - # This test works for both compilers. - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - SUN_ARCH="x86_64" - fi - fi - echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:6*:*) - # According to config.sub, this is the proper way to canonicalize - # SunOS6. Hard to guess exactly what SunOS6 will be like, but - # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:*:*) - case "`/usr/bin/arch -k`" in - Series*|S4*) - UNAME_RELEASE=`uname -v` - ;; - esac - # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` - exit ;; - sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} - exit ;; - sun*:*:4.2BSD:*) - UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 - case "`/bin/arch`" in - sun3) - echo m68k-sun-sunos${UNAME_RELEASE} - ;; - sun4) - echo sparc-sun-sunos${UNAME_RELEASE} - ;; - esac - exit ;; - aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} - exit ;; - # The situation for MiNT is a little confusing. The machine name - # can be virtually everything (everything which is not - # "atarist" or "atariste" at least should have a processor - # > m68000). The system name ranges from "MiNT" over "FreeMiNT" - # to the lowercase version "mint" (or "freemint"). Finally - # the system name "TOS" denotes a system which is actually not - # MiNT. But MiNT is downward compatible to TOS, so this should - # be no problem. - atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} - exit ;; - hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} - exit ;; - *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} - exit ;; - m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} - exit ;; - powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} - exit ;; - RISC*:Mach:*:*) - echo mips-dec-mach_bsd4.3 - exit ;; - RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} - exit ;; - VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} - exit ;; - 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} - exit ;; - mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c -#ifdef __cplusplus -#include /* for printf() prototype */ - int main (int argc, char *argv[]) { -#else - int main (argc, argv) int argc; char *argv[]; { -#endif - #if defined (host_mips) && defined (MIPSEB) - #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); - #endif - #endif - exit (-1); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && - { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} - exit ;; - Motorola:PowerMAX_OS:*:*) - echo powerpc-motorola-powermax - exit ;; - Motorola:*:4.3:PL8-*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:Power_UNIX:*:*) - echo powerpc-harris-powerunix - exit ;; - m88k:CX/UX:7*:*) - echo m88k-harris-cxux7 - exit ;; - m88k:*:4*:R4*) - echo m88k-motorola-sysv4 - exit ;; - m88k:*:3*:R3*) - echo m88k-motorola-sysv3 - exit ;; - AViiON:dgux:*:*) - # DG/UX returns AViiON for all architectures - UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] - then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] - then - echo m88k-dg-dgux${UNAME_RELEASE} - else - echo m88k-dg-dguxbcs${UNAME_RELEASE} - fi - else - echo i586-dg-dgux${UNAME_RELEASE} - fi - exit ;; - M88*:DolphinOS:*:*) # DolphinOS (SVR3) - echo m88k-dolphin-sysv3 - exit ;; - M88*:*:R3*:*) - # Delta 88k system running SVR3 - echo m88k-motorola-sysv3 - exit ;; - XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) - echo m88k-tektronix-sysv3 - exit ;; - Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) - echo m68k-tektronix-bsd - exit ;; - *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` - exit ;; - ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. - echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id - exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' - i*86:AIX:*:*) - echo i386-ibm-aix - exit ;; - ia64:AIX:*:*) - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} - exit ;; - *:AIX:2:3) - if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - - main() - { - if (!__power_pc()) - exit(1); - puts("powerpc-ibm-aix3.2.5"); - exit(0); - } -EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` - then - echo "$SYSTEM_NAME" - else - echo rs6000-ibm-aix3.2.5 - fi - elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then - echo rs6000-ibm-aix3.2.4 - else - echo rs6000-ibm-aix3.2 - fi - exit ;; - *:AIX:*:[4567]) - IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then - IBM_ARCH=rs6000 - else - IBM_ARCH=powerpc - fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} - exit ;; - *:AIX:*:*) - echo rs6000-ibm-aix - exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) - echo romp-ibm-bsd4.4 - exit ;; - ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to - exit ;; # report: romp-ibm BSD 4.3 - *:BOSX:*:*) - echo rs6000-bull-bosx - exit ;; - DPX/2?00:B.O.S.:*:*) - echo m68k-bull-sysv3 - exit ;; - 9000/[34]??:4.3bsd:1.*:*) - echo m68k-hp-bsd - exit ;; - hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) - echo m68k-hp-bsd4.4 - exit ;; - 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; - 9000/[678][0-9][0-9]) - if [ -x /usr/bin/getconf ]; then - sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` - sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 - 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 - esac ;; - esac - fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - - #define _HPUX_SOURCE - #include - #include - - int main () - { - #if defined(_SC_KERNEL_BITS) - long bits = sysconf(_SC_KERNEL_BITS); - #endif - long cpu = sysconf (_SC_CPU_VERSION); - - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1"); break; - case CPU_PA_RISC2_0: - #if defined(_SC_KERNEL_BITS) - switch (bits) - { - case 64: puts ("hppa2.0w"); break; - case 32: puts ("hppa2.0n"); break; - default: puts ("hppa2.0"); break; - } break; - #else /* !defined(_SC_KERNEL_BITS) */ - puts ("hppa2.0"); break; - #endif - default: puts ("hppa1.0"); break; - } - exit (0); - } -EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` - test -z "$HP_ARCH" && HP_ARCH=hppa - fi ;; - esac - if [ ${HP_ARCH} = "hppa2.0w" ] - then - eval $set_cc_for_build - - # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating - # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler - # generating 64-bit code. GNU and HP use different nomenclature: - # - # $ CC_FOR_BUILD=cc ./config.guess - # => hppa2.0w-hp-hpux11.23 - # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess - # => hppa64-hp-hpux11.23 - - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | - grep -q __LP64__ - then - HP_ARCH="hppa2.0w" - else - HP_ARCH="hppa64" - fi - fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} - exit ;; - ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} - exit ;; - 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include - int - main () - { - long cpu = sysconf (_SC_CPU_VERSION); - /* The order matters, because CPU_IS_HP_MC68K erroneously returns - true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct - results, however. */ - if (CPU_IS_PA_RISC (cpu)) - { - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; - case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; - default: puts ("hppa-hitachi-hiuxwe2"); break; - } - } - else if (CPU_IS_HP_MC68K (cpu)) - puts ("m68k-hitachi-hiuxwe2"); - else puts ("unknown-hitachi-hiuxwe2"); - exit (0); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - echo unknown-hitachi-hiuxwe2 - exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) - echo hppa1.1-hp-bsd - exit ;; - 9000/8??:4.3bsd:*:*) - echo hppa1.0-hp-bsd - exit ;; - *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) - echo hppa1.0-hp-mpeix - exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) - echo hppa1.1-hp-osf - exit ;; - hp8??:OSF1:*:*) - echo hppa1.0-hp-osf - exit ;; - i*86:OSF1:*:*) - if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk - else - echo ${UNAME_MACHINE}-unknown-osf1 - fi - exit ;; - parisc*:Lites*:*:*) - echo hppa1.1-hp-lites - exit ;; - C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) - echo c1-convex-bsd - exit ;; - C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) - echo c34-convex-bsd - exit ;; - C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) - echo c38-convex-bsd - exit ;; - C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) - echo c4-convex-bsd - exit ;; - CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ - | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ - -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ - -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` - echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` - echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} - exit ;; - sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:FreeBSD:*:*) - UNAME_PROCESSOR=`/usr/bin/uname -p` - case ${UNAME_PROCESSOR} in - amd64) - echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - *) - echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - esac - exit ;; - i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin - exit ;; - *:MINGW64*:*) - echo ${UNAME_MACHINE}-pc-mingw64 - exit ;; - *:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 - exit ;; - i*:MSYS*:*) - echo ${UNAME_MACHINE}-pc-msys - exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 - exit ;; - i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 - exit ;; - *:Interix*:*) - case ${UNAME_MACHINE} in - x86) - echo i586-pc-interix${UNAME_RELEASE} - exit ;; - authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix${UNAME_RELEASE} - exit ;; - IA64) - echo ia64-unknown-interix${UNAME_RELEASE} - exit ;; - esac ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - 8664:Windows_NT:*) - echo x86_64-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; - i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin - exit ;; - amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) - echo x86_64-unknown-cygwin - exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin - exit ;; - prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - *:GNU:*:*) - # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` - exit ;; - *:GNU/*:*:*) - # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} - exit ;; - i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix - exit ;; - aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - aarch64_be:Linux:*:*) - UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - alpha:Linux:*:*) - case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in - EV5) UNAME_MACHINE=alphaev5 ;; - EV56) UNAME_MACHINE=alphaev56 ;; - PCA56) UNAME_MACHINE=alphapca56 ;; - PCA57) UNAME_MACHINE=alphapca56 ;; - EV6) UNAME_MACHINE=alphaev6 ;; - EV67) UNAME_MACHINE=alphaev67 ;; - EV68*) UNAME_MACHINE=alphaev68 ;; - esac - objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="gnulibc1" ; fi - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - arc:Linux:*:* | arceb:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - arm*:Linux:*:*) - eval $set_cc_for_build - if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_EABI__ - then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - else - if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_PCS_VFP - then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi - else - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf - fi - fi - exit ;; - avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} - exit ;; - crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} - exit ;; - frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - i*86:Linux:*:*) - echo ${UNAME_MACHINE}-pc-linux-${LIBC} - exit ;; - ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - mips:Linux:*:* | mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #undef CPU - #undef ${UNAME_MACHINE} - #undef ${UNAME_MACHINE}el - #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) - CPU=${UNAME_MACHINE}el - #else - #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) - CPU=${UNAME_MACHINE} - #else - CPU= - #endif - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } - ;; - or1k:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - or32:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - padre:Linux:*:*) - echo sparc-unknown-linux-${LIBC} - exit ;; - parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-${LIBC} - exit ;; - parisc:Linux:*:* | hppa:Linux:*:*) - # Look for CPU level - case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; - PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; - *) echo hppa-unknown-linux-${LIBC} ;; - esac - exit ;; - ppc64:Linux:*:*) - echo powerpc64-unknown-linux-${LIBC} - exit ;; - ppc:Linux:*:*) - echo powerpc-unknown-linux-${LIBC} - exit ;; - ppc64le:Linux:*:*) - echo powerpc64le-unknown-linux-${LIBC} - exit ;; - ppcle:Linux:*:*) - echo powerpcle-unknown-linux-${LIBC} - exit ;; - s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux-${LIBC} - exit ;; - sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-${LIBC} - exit ;; - x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; - i*86:DYNIX/ptx:4*:*) - # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. - # earlier versions are messed up and put the nodename in both - # sysname and nodename. - echo i386-sequent-sysv4 - exit ;; - i*86:UNIX_SV:4.2MP:2.*) - # Unixware is an offshoot of SVR4, but it has its own version - # number series starting with 2... - # I am not positive that other SVR4 systems won't match this, - # I just have to hope. -- rms. - # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} - exit ;; - i*86:OS/2:*:*) - # If we were able to find `uname', then EMX Unix compatibility - # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx - exit ;; - i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop - exit ;; - i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos - exit ;; - i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable - exit ;; - i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} - exit ;; - i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp - exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` - if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} - else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} - fi - exit ;; - i*86:*:5:[678]*) - # UnixWare 7.x, OpenUNIX and OpenServer 6. - case `/bin/uname -X | grep "^Machine"` in - *486*) UNAME_MACHINE=i486 ;; - *Pentium) UNAME_MACHINE=i586 ;; - *Pent*|*Celeron) UNAME_MACHINE=i686 ;; - esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} - exit ;; - i*86:*:3.2:*) - if test -f /usr/options/cb.name; then - UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then - UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` - (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 - (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ - && UNAME_MACHINE=i586 - (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ - && UNAME_MACHINE=i686 - (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ - && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL - else - echo ${UNAME_MACHINE}-pc-sysv32 - fi - exit ;; - pc:*:*:*) - # Left here for compatibility: - # uname -m prints for DJGPP always 'pc', but it prints nothing about - # the processor, so we play safe by assuming i586. - # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that - # this is a cross-build. - echo i586-pc-msdosdjgpp - exit ;; - Intel:Mach:3*:*) - echo i386-pc-mach3 - exit ;; - paragon:*:*:*) - echo i860-intel-osf1 - exit ;; - i860:*:4.*:*) # i860-SVR4 - if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 - else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 - fi - exit ;; - mini*:CTIX:SYS*5:*) - # "miniframe" - echo m68010-convergent-sysv - exit ;; - mc68k:UNIX:SYSTEM5:3.51m) - echo m68k-convergent-sysv - exit ;; - M680?0:D-NIX:5.3:*) - echo m68k-diab-dnix - exit ;; - M68*:*:R3V[5678]*:*) - test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; - 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) - OS_REL='' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4; exit; } ;; - NCR*:*:4.2:* | MPRAS*:*:4.2:*) - OS_REL='.3' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} - exit ;; - mc68030:UNIX_System_V:4.*:*) - echo m68k-atari-sysv4 - exit ;; - TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} - exit ;; - rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} - exit ;; - PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} - exit ;; - SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} - exit ;; - RM*:ReliantUNIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - RM*:SINIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - *:SINIX-*:*:*) - if uname -p 2>/dev/null >/dev/null ; then - UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 - else - echo ns32k-sni-sysv - fi - exit ;; - PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort - # says - echo i586-unisys-sysv4 - exit ;; - *:UNIX_System_V:4*:FTX*) - # From Gerald Hewes . - # How about differentiating between stratus architectures? -djm - echo hppa1.1-stratus-sysv4 - exit ;; - *:*:*:FTX*) - # From seanf@swdc.stratus.com. - echo i860-stratus-sysv4 - exit ;; - i*86:VOS:*:*) - # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos - exit ;; - *:VOS:*:*) - # From Paul.Green@stratus.com. - echo hppa1.1-stratus-vos - exit ;; - mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} - exit ;; - news*:NEWS-OS:6*:*) - echo mips-sony-newsos6 - exit ;; - R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) - if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} - else - echo mips-unknown-sysv${UNAME_RELEASE} - fi - exit ;; - BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. - echo powerpc-be-beos - exit ;; - BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. - echo powerpc-apple-beos - exit ;; - BePC:BeOS:*:*) # BeOS running on Intel PC compatible. - echo i586-pc-beos - exit ;; - BePC:Haiku:*:*) # Haiku running on Intel PC compatible. - echo i586-pc-haiku - exit ;; - x86_64:Haiku:*:*) - echo x86_64-unknown-haiku - exit ;; - SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} - exit ;; - SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} - exit ;; - SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} - exit ;; - SX-7:SUPER-UX:*:*) - echo sx7-nec-superux${UNAME_RELEASE} - exit ;; - SX-8:SUPER-UX:*:*) - echo sx8-nec-superux${UNAME_RELEASE} - exit ;; - SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux${UNAME_RELEASE} - exit ;; - Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Darwin:*:*) - UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - eval $set_cc_for_build - if test "$UNAME_PROCESSOR" = unknown ; then - UNAME_PROCESSOR=powerpc - fi - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - case $UNAME_PROCESSOR in - i386) UNAME_PROCESSOR=x86_64 ;; - powerpc) UNAME_PROCESSOR=powerpc64 ;; - esac - fi - fi - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} - exit ;; - *:procnto*:*:* | *:QNX:[0123456789]*:*) - UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then - UNAME_PROCESSOR=i386 - UNAME_MACHINE=pc - fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} - exit ;; - *:QNX:*:4*) - echo i386-pc-qnx - exit ;; - NEO-?:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk${UNAME_RELEASE} - exit ;; - NSE-*:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} - exit ;; - NSR-?:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} - exit ;; - *:NonStop-UX:*:*) - echo mips-compaq-nonstopux - exit ;; - BS2000:POSIX*:*:*) - echo bs2000-siemens-sysv - exit ;; - DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} - exit ;; - *:Plan9:*:*) - # "uname -m" is not consistent, so use $cputype instead. 386 - # is converted to i386 for consistency with other x86 - # operating systems. - if test "$cputype" = "386"; then - UNAME_MACHINE=i386 - else - UNAME_MACHINE="$cputype" - fi - echo ${UNAME_MACHINE}-unknown-plan9 - exit ;; - *:TOPS-10:*:*) - echo pdp10-unknown-tops10 - exit ;; - *:TENEX:*:*) - echo pdp10-unknown-tenex - exit ;; - KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) - echo pdp10-dec-tops20 - exit ;; - XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) - echo pdp10-xkl-tops20 - exit ;; - *:TOPS-20:*:*) - echo pdp10-unknown-tops20 - exit ;; - *:ITS:*:*) - echo pdp10-unknown-its - exit ;; - SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} - exit ;; - *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` - exit ;; - *:*VMS:*:*) - UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in - A*) echo alpha-dec-vms ; exit ;; - I*) echo ia64-dec-vms ; exit ;; - V*) echo vax-dec-vms ; exit ;; - esac ;; - *:XENIX:*:SysV) - echo i386-pc-xenix - exit ;; - i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' - exit ;; - i*86:rdos:*:*) - echo ${UNAME_MACHINE}-pc-rdos - exit ;; - i*86:AROS:*:*) - echo ${UNAME_MACHINE}-pc-aros - exit ;; - x86_64:VMkernel:*:*) - echo ${UNAME_MACHINE}-unknown-esx - exit ;; -esac - -eval $set_cc_for_build -cat >$dummy.c < -# include -#endif -main () -{ -#if defined (sony) -#if defined (MIPSEB) - /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, - I don't know.... */ - printf ("mips-sony-bsd\n"); exit (0); -#else -#include - printf ("m68k-sony-newsos%s\n", -#ifdef NEWSOS4 - "4" -#else - "" -#endif - ); exit (0); -#endif -#endif - -#if defined (__arm) && defined (__acorn) && defined (__unix) - printf ("arm-acorn-riscix\n"); exit (0); -#endif - -#if defined (hp300) && !defined (hpux) - printf ("m68k-hp-bsd\n"); exit (0); -#endif - -#if defined (NeXT) -#if !defined (__ARCHITECTURE__) -#define __ARCHITECTURE__ "m68k" -#endif - int version; - version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; - if (version < 4) - printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); - else - printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); - exit (0); -#endif - -#if defined (MULTIMAX) || defined (n16) -#if defined (UMAXV) - printf ("ns32k-encore-sysv\n"); exit (0); -#else -#if defined (CMU) - printf ("ns32k-encore-mach\n"); exit (0); -#else - printf ("ns32k-encore-bsd\n"); exit (0); -#endif -#endif -#endif - -#if defined (__386BSD__) - printf ("i386-pc-bsd\n"); exit (0); -#endif - -#if defined (sequent) -#if defined (i386) - printf ("i386-sequent-dynix\n"); exit (0); -#endif -#if defined (ns32000) - printf ("ns32k-sequent-dynix\n"); exit (0); -#endif -#endif - -#if defined (_SEQUENT_) - struct utsname un; - - uname(&un); - - if (strncmp(un.version, "V2", 2) == 0) { - printf ("i386-sequent-ptx2\n"); exit (0); - } - if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ - printf ("i386-sequent-ptx1\n"); exit (0); - } - printf ("i386-sequent-ptx\n"); exit (0); - -#endif - -#if defined (vax) -# if !defined (ultrix) -# include -# if defined (BSD) -# if BSD == 43 - printf ("vax-dec-bsd4.3\n"); exit (0); -# else -# if BSD == 199006 - printf ("vax-dec-bsd4.3reno\n"); exit (0); -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# endif -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# else - printf ("vax-dec-ultrix\n"); exit (0); -# endif -#endif - -#if defined (alliant) && defined (i860) - printf ("i860-alliant-bsd\n"); exit (0); -#endif - - exit (1); -} -EOF - -$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - -# Apollos put the system type in the environment. - -test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } - -# Convex versions that predate uname can use getsysinfo(1) - -if [ -x /usr/convex/getsysinfo ] -then - case `getsysinfo -f cpu_type` in - c1*) - echo c1-convex-bsd - exit ;; - c2*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - c34*) - echo c34-convex-bsd - exit ;; - c38*) - echo c38-convex-bsd - exit ;; - c4*) - echo c4-convex-bsd - exit ;; - esac -fi - -cat >&2 < in order to provide the needed -information to handle your system. - -config.guess timestamp = $timestamp - -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null` - -hostinfo = `(hostinfo) 2>/dev/null` -/bin/universe = `(/bin/universe) 2>/dev/null` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` -/bin/arch = `(/bin/arch) 2>/dev/null` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` - -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} -EOF - -exit 1 - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/config.h.in b/config.h.in deleted file mode 100644 index fd7bc5eb8..000000000 --- a/config.h.in +++ /dev/null @@ -1,329 +0,0 @@ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* compile and link with jemalloc */ -#undef ENABLE_JEMALLOC - -/* compile and link with tcmalloc */ -#undef ENABLE_TCMALLOC - -/* Define to 1 if you have the `accept4' function. */ -#undef HAVE_ACCEPT4 - -/* Define to 1 if you have the header file. */ -#undef HAVE_ARPA_NAMESER_H - -/* libcap usability */ -#undef HAVE_CAPABILITY - -/* Define to 1 if the system has the type `clockid_t'. */ -#undef HAVE_CLOCKID_T - -/* Define to 1 if you have the `clock_gettime' function. */ -#undef HAVE_CLOCK_GETTIME - -/* Define to 1 if glibc mallinfo exists. */ -#undef HAVE_C_MALLINFO - -/* Define to 1 if glibc mallopt exists. */ -#undef HAVE_C_MALLOPT - -/* Define to 1 if C11-style _Generic works. */ -#undef HAVE_C__GENERIC - -/* Define to 1 if __atomic operations work. */ -#undef HAVE_C___ATOMIC - -/* Define to 1 if you have the declaration of `strerror_r', and to 0 if you - don't. */ -#undef HAVE_DECL_STRERROR_R - -/* ipmimonitoring usability */ -#undef HAVE_FREEIPMI - -/* Define to 1 if the system has the `format' function attribute */ -#undef HAVE_FUNC_ATTRIBUTE_FORMAT - -/* Define to 1 if the system has the `malloc' function attribute */ -#undef HAVE_FUNC_ATTRIBUTE_MALLOC - -/* Define to 1 if the system has the `noinline' function attribute */ -#undef HAVE_FUNC_ATTRIBUTE_NOINLINE - -/* Define to 1 if the system has the `noreturn' function attribute */ -#undef HAVE_FUNC_ATTRIBUTE_NORETURN - -/* Define to 1 if the system has the `returns_nonnull' function attribute */ -#undef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL - -/* Define to 1 if the system has the `warn_unused_result' function attribute - */ -#undef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT - -/* Define to 1 if you have the `getpriority' function. */ -#undef HAVE_GETPRIORITY - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_JEMALLOC_JEMALLOC_H - -/* libmnl usability */ -#undef HAVE_LIBMNL - -/* libnetfilter_acct usability */ -#undef HAVE_LIBNETFILTER_ACCT - -/* Define to 1 if you have the header - file. */ -#undef HAVE_LINUX_NETFILTER_NFNETLINK_CONNTRACK_H - -/* Define to 1 if -flto works. */ -#undef HAVE_LTO - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_NETDB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_NETINET_IN_H - -/* Define to 1 if you have the `nice' function. */ -#undef HAVE_NICE - -/* Define if you have POSIX threads libraries and header files. */ -#undef HAVE_PTHREAD - -/* Have PTHREAD_PRIO_INHERIT. */ -#undef HAVE_PTHREAD_PRIO_INHERIT - -/* Define to 1 if you have the `recvmmsg' function. */ -#undef HAVE_RECVMMSG - -/* Define to 1 if you have the header file. */ -#undef HAVE_RESOLV_H - -/* Define to 1 if you have the `sched_getparam' function. */ -#undef HAVE_SCHED_GETPARAM - -/* Define to 1 if you have the `sched_getscheduler' function. */ -#undef HAVE_SCHED_GETSCHEDULER - -/* Define to 1 if you have the `sched_get_priority_max' function. */ -#undef HAVE_SCHED_GET_PRIORITY_MAX - -/* Define to 1 if you have the `sched_get_priority_min' function. */ -#undef HAVE_SCHED_GET_PRIORITY_MIN - -/* Define to 1 if you have the `sched_setscheduler' function. */ -#undef HAVE_SCHED_SETSCHEDULER - -/* Define 1 if you have setns() function */ -#undef HAVE_SETNS - -/* Define to 1 if you have the `setpriority' function. */ -#undef HAVE_SETPRIORITY - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the `strerror_r' function. */ -#undef HAVE_STRERROR_R - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if the system has the type `struct timespec'. */ -#undef HAVE_STRUCT_TIMESPEC - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_MOUNT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_PRCTL_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STATFS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STATVFS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_VFS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* nfacct plugin usability */ -#undef INTERNAL_PLUGIN_NFACCT - -/* Define to 1 if `major', `minor', and `makedev' are declared in . - */ -#undef MAJOR_IN_MKDEV - -/* Define to 1 if `major', `minor', and `makedev' are declared in - . */ -#undef MAJOR_IN_SYSMACROS - -/* use this user to drop privileged */ -#undef NETDATA_USER - -/* uuid usability */ -#undef NETDATA_WITH_UUID - -/* zlib usability */ -#undef NETDATA_WITH_ZLIB - -/* Name of package */ -#undef PACKAGE - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the home page for this package. */ -#undef PACKAGE_URL - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* Define to necessary symbol if this constant uses a non-standard name on - your system. */ -#undef PTHREAD_CREATE_JOINABLE - -/* The size of `void *', as computed by sizeof. */ -#undef SIZEOF_VOID_P - -/* Define to 1 if you have the ANSI C header files. */ -#undef STDC_HEADERS - -/* math usability */ -#undef STORAGE_WITH_MATH - -/* Define to 1 if strerror_r returns char *. */ -#undef STRERROR_R_CHAR_P - -/* Enable extensions on AIX 3, Interix. */ -#ifndef _ALL_SOURCE -# undef _ALL_SOURCE -#endif -/* Enable GNU extensions on systems that have them. */ -#ifndef _GNU_SOURCE -# undef _GNU_SOURCE -#endif -/* Enable threading extensions on Solaris. */ -#ifndef _POSIX_PTHREAD_SEMANTICS -# undef _POSIX_PTHREAD_SEMANTICS -#endif -/* Enable extensions on HP NonStop. */ -#ifndef _TANDEM_SOURCE -# undef _TANDEM_SOURCE -#endif -/* Enable general extensions on Solaris. */ -#ifndef __EXTENSIONS__ -# undef __EXTENSIONS__ -#endif - - -/* Version number of package */ -#undef VERSION - -/* Define to 1 if on MINIX. */ -#undef _MINIX - -/* Define to 2 if the system does not provide POSIX.1 features except with - this defined. */ -#undef _POSIX_1_SOURCE - -/* Define to 1 if you need to in order for `stat' and other things to work. */ -#undef _POSIX_SOURCE - -/* Define for Solaris 2.5.1 so the uint32_t typedef from , - , or is not used. If the typedef were allowed, the - #define below would cause a syntax error. */ -#undef _UINT32_T - -/* Define for Solaris 2.5.1 so the uint64_t typedef from , - , or is not used. If the typedef were allowed, the - #define below would cause a syntax error. */ -#undef _UINT64_T - -/* Define for Solaris 2.5.1 so the uint8_t typedef from , - , or is not used. If the typedef were allowed, the - #define below would cause a syntax error. */ -#undef _UINT8_T - -/* Link/compile against jemalloc */ -#undef has_jemalloc - -/* Link/compile against tcmalloc */ -#undef has_tcmalloc - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -#undef inline -#endif - -/* Define to the type of a signed integer type of width exactly 16 bits if - such a type exists and the standard includes do not define it. */ -#undef int16_t - -/* Define to the type of a signed integer type of width exactly 32 bits if - such a type exists and the standard includes do not define it. */ -#undef int32_t - -/* Define to the type of a signed integer type of width exactly 64 bits if - such a type exists and the standard includes do not define it. */ -#undef int64_t - -/* Define to the type of a signed integer type of width exactly 8 bits if such - a type exists and the standard includes do not define it. */ -#undef int8_t - -/* gcc branch optimization */ -#undef likely - -/* jemalloc prefix */ -#undef prefix_jemalloc - -/* Define to the type of an unsigned integer type of width exactly 16 bits if - such a type exists and the standard includes do not define it. */ -#undef uint16_t - -/* Define to the type of an unsigned integer type of width exactly 32 bits if - such a type exists and the standard includes do not define it. */ -#undef uint32_t - -/* Define to the type of an unsigned integer type of width exactly 64 bits if - such a type exists and the standard includes do not define it. */ -#undef uint64_t - -/* Define to the type of an unsigned integer type of width exactly 8 bits if - such a type exists and the standard includes do not define it. */ -#undef uint8_t - -/* gcc branch optimization */ -#undef unlikely diff --git a/config.sub b/config.sub deleted file mode 100755 index 9633db704..000000000 --- a/config.sub +++ /dev/null @@ -1,1791 +0,0 @@ -#! /bin/sh -# Configuration validation subroutine script. -# Copyright 1992-2013 Free Software Foundation, Inc. - -timestamp='2013-08-10' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). - - -# Please send patches with a ChangeLog entry to config-patches@gnu.org. -# -# Configuration subroutine to validate and canonicalize a configuration type. -# Supply the specified configuration type as an argument. -# If it is invalid, we print an error message on stderr and exit with code 1. -# Otherwise, we print the canonical config type on stdout and succeed. - -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD - -# This file is supposed to be the same for all GNU packages -# and recognize all the CPU types, system types and aliases -# that are meaningful with *any* GNU software. -# Each package is responsible for reporting which valid configurations -# it does not support. The user should be able to distinguish -# a failure to support a valid configuration from a meaningless -# configuration. - -# The goal of this file is to map all the various variations of a given -# machine specification into a single specification in the form: -# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM -# or in some cases, the newer four-part form: -# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM -# It is wrong to echo any other type of specification. - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS - -Canonicalize a configuration name. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.sub ($timestamp) - -Copyright 1992-2013 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" - exit 1 ;; - - *local*) - # First pass through any local machine types. - echo $1 - exit ;; - - * ) - break ;; - esac -done - -case $# in - 0) echo "$me: missing argument$help" >&2 - exit 1;; - 1) ;; - *) echo "$me: too many arguments$help" >&2 - exit 1;; -esac - -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | \ - kopensolaris*-gnu* | \ - storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - android-linux) - os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac - -### Let's recognize common machines as not being operating systems so -### that things like config.sub decstation-3100 work. We also -### recognize some manufacturers as not being operating systems, so we -### can provide default operating systems below. -case $os in - -sun*os*) - # Prevent following clause from handling this invalid input. - ;; - -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ - -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ - -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ - -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ - -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ - -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis | -knuth | -cray | -microblaze*) - os= - basic_machine=$1 - ;; - -bluegene*) - os=-cnk - ;; - -sim | -cisco | -oki | -wec | -winbond) - os= - basic_machine=$1 - ;; - -scout) - ;; - -wrs) - os=-vxworks - basic_machine=$1 - ;; - -chorusos*) - os=-chorusos - basic_machine=$1 - ;; - -chorusrdb) - os=-chorusrdb - basic_machine=$1 - ;; - -hiux*) - os=-hiuxwe2 - ;; - -sco6) - os=-sco5v6 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5) - os=-sco3.2v5 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco4) - os=-sco3.2v4 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2.[4-9]*) - os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2v[4-9]*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5v6*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco*) - os=-sco3.2v2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -udk*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -isc) - os=-isc2.2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -clix*) - basic_machine=clipper-intergraph - ;; - -isc*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -lynx*178) - os=-lynxos178 - ;; - -lynx*5) - os=-lynxos5 - ;; - -lynx*) - os=-lynxos - ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` - ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` - ;; - -psos*) - os=-psos - ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; -esac - -# Decode aliases for certain CPU-COMPANY combinations. -case $basic_machine in - # Recognize the basic CPU types without company name. - # Some are omitted here because they have special meanings below. - 1750a | 580 \ - | a29k \ - | aarch64 | aarch64_be \ - | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ - | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ - | am33_2.0 \ - | arc | arceb \ - | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ - | avr | avr32 \ - | be32 | be64 \ - | bfin \ - | c4x | c8051 | clipper \ - | d10v | d30v | dlx | dsp16xx \ - | epiphany \ - | fido | fr30 | frv \ - | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ - | hexagon \ - | i370 | i860 | i960 | ia64 \ - | ip2k | iq2000 \ - | le32 | le64 \ - | lm32 \ - | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ - | mn10200 | mn10300 \ - | moxie \ - | mt \ - | msp430 \ - | nds32 | nds32le | nds32be \ - | nios | nios2 | nios2eb | nios2el \ - | ns16k | ns32k \ - | open8 \ - | or1k | or32 \ - | pdp10 | pdp11 | pj | pjl \ - | powerpc | powerpc64 | powerpc64le | powerpcle \ - | pyramid \ - | rl78 | rx \ - | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ - | sh64 | sh64le \ - | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ - | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ - | spu \ - | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ - | ubicom32 \ - | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | we32k \ - | x86 | xc16x | xstormy16 | xtensa \ - | z8k | z80) - basic_machine=$basic_machine-unknown - ;; - c54x) - basic_machine=tic54x-unknown - ;; - c55x) - basic_machine=tic55x-unknown - ;; - c6x) - basic_machine=tic6x-unknown - ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip) - basic_machine=$basic_machine-unknown - os=-none - ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) - ;; - ms1) - basic_machine=mt-unknown - ;; - - strongarm | thumb | xscale) - basic_machine=arm-unknown - ;; - xgate) - basic_machine=$basic_machine-unknown - os=-none - ;; - xscaleeb) - basic_machine=armeb-unknown - ;; - - xscaleel) - basic_machine=armel-unknown - ;; - - # We use `pc' rather than `unknown' - # because (1) that's what they normally are, and - # (2) the word "unknown" tends to confuse beginning users. - i*86 | x86_64) - basic_machine=$basic_machine-pc - ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; - # Recognize the basic CPU types with company name. - 580-* \ - | a29k-* \ - | aarch64-* | aarch64_be-* \ - | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ - | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ - | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ - | avr-* | avr32-* \ - | be32-* | be64-* \ - | bfin-* | bs2000-* \ - | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | c8051-* | clipper-* | craynv-* | cydra-* \ - | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ - | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ - | h8300-* | h8500-* \ - | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ - | hexagon-* \ - | i*86-* | i860-* | i960-* | ia64-* \ - | ip2k-* | iq2000-* \ - | le32-* | le64-* \ - | lm32-* \ - | m32c-* | m32r-* | m32rle-* \ - | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ - | microblaze-* | microblazeel-* \ - | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ - | mips16-* \ - | mips64-* | mips64el-* \ - | mips64octeon-* | mips64octeonel-* \ - | mips64orion-* | mips64orionel-* \ - | mips64r5900-* | mips64r5900el-* \ - | mips64vr-* | mips64vrel-* \ - | mips64vr4100-* | mips64vr4100el-* \ - | mips64vr4300-* | mips64vr4300el-* \ - | mips64vr5000-* | mips64vr5000el-* \ - | mips64vr5900-* | mips64vr5900el-* \ - | mipsisa32-* | mipsisa32el-* \ - | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa64-* | mipsisa64el-* \ - | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64sb1-* | mipsisa64sb1el-* \ - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipsr5900-* | mipsr5900el-* \ - | mipstx39-* | mipstx39el-* \ - | mmix-* \ - | mt-* \ - | msp430-* \ - | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* | nios2eb-* | nios2el-* \ - | none-* | np1-* | ns16k-* | ns32k-* \ - | open8-* \ - | orion-* \ - | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ - | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ - | pyramid-* \ - | rl78-* | romp-* | rs6000-* | rx-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ - | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ - | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ - | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ - | tahoe-* \ - | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tile*-* \ - | tron-* \ - | ubicom32-* \ - | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ - | vax-* \ - | we32k-* \ - | x86-* | x86_64-* | xc16x-* | xps100-* \ - | xstormy16-* | xtensa*-* \ - | ymp-* \ - | z8k-* | z80-*) - ;; - # Recognize the basic CPU types without company name, with glob match. - xtensa*) - basic_machine=$basic_machine-unknown - ;; - # Recognize the various machine names and aliases which stand - # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; - 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) - basic_machine=m68000-att - ;; - 3b*) - basic_machine=we32k-att - ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; - abacus) - basic_machine=abacus-unknown - ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; - alliant | fx80) - basic_machine=fx80-alliant - ;; - altos | altos3068) - basic_machine=m68k-altos - ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; - amd64) - basic_machine=x86_64-pc - ;; - amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv - ;; - amiga | amiga-*) - basic_machine=m68k-unknown - ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aros) - basic_machine=i386-pc - os=-aros - ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=-linux - ;; - blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - bluegene*) - basic_machine=powerpc-ibm - os=-cnk - ;; - c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c90) - basic_machine=c90-cray - os=-unicos - ;; - cegcc) - basic_machine=arm-unknown - os=-cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=-bsd - ;; - convex-c2) - basic_machine=c2-convex - os=-bsd - ;; - convex-c32) - basic_machine=c32-convex - os=-bsd - ;; - convex-c34) - basic_machine=c34-convex - os=-bsd - ;; - convex-c38) - basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp - ;; - cr16 | cr16-*) - basic_machine=cr16-unknown - os=-elf - ;; - crds | unos) - basic_machine=m68k-crds - ;; - crisv32 | crisv32-* | etraxfs*) - basic_machine=crisv32-axis - ;; - cris | cris-* | etrax*) - basic_machine=cris-axis - ;; - crx) - basic_machine=crx-unknown - os=-elf - ;; - da30 | da30-*) - basic_machine=m68k-da30 - ;; - decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) - basic_machine=mips-dec - ;; - decsystem10* | dec10*) - basic_machine=pdp10-dec - os=-tops10 - ;; - decsystem20* | dec20*) - basic_machine=pdp10-dec - os=-tops20 - ;; - delta | 3300 | motorola-3300 | motorola-delta \ - | 3300-motorola | delta-motorola) - basic_machine=m68k-motorola - ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - dicos) - basic_machine=i686-pc - os=-dicos - ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; - dpx20 | dpx20-*) - basic_machine=rs6000-bull - os=-bosx - ;; - dpx2* | dpx2*-bull) - basic_machine=m68k-bull - os=-sysv3 - ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon - ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd - ;; - encore | umax | mmax) - basic_machine=ns32k-encore - ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose - ;; - fx2800) - basic_machine=i860-alliant - ;; - genix) - basic_machine=ns32k-ns - ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 - ;; - h3050r* | hiux*) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 - ;; - hp300-*) - basic_machine=m68k-hp - ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux - ;; - hp3k9[0-9][0-9] | hp9[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k2[0-9][0-9] | hp9k31[0-9]) - basic_machine=m68000-hp - ;; - hp9k3[2-9][0-9]) - basic_machine=m68k-hp - ;; - hp9k6[0-9][0-9] | hp6[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k7[0-79][0-9] | hp7[0-79][0-9]) - basic_machine=hppa1.1-hp - ;; - hp9k78[0-9] | hp78[0-9]) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][13679] | hp8[0-9][13679]) - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][0-9] | hp8[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm - ;; - i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 - ;; - i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 - ;; - i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv - ;; - i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach - ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta - ;; - iris | iris4d) - basic_machine=mips-sgi - case $os in - -irix*) - ;; - *) - os=-irix4 - ;; - esac - ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; - m68knommu) - basic_machine=m68k-unknown - os=-linux - ;; - m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv - ;; - merlin) - basic_machine=ns32k-utek - os=-sysv - ;; - microblaze*) - basic_machine=microblaze-xilinx - ;; - mingw64) - basic_machine=x86_64-pc - os=-mingw64 - ;; - mingw32) - basic_machine=i686-pc - os=-mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=-mingw32ce - ;; - miniframe) - basic_machine=m68000-convergent - ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; - mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` - ;; - mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - msdos) - basic_machine=i386-pc - os=-msdos - ;; - ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` - ;; - msys) - basic_machine=i686-pc - os=-msys - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - nacl) - basic_machine=le32-unknown - os=-nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos - ;; - news-3600 | risc-news) - basic_machine=mips-sony - os=-newsos - ;; - necv70) - basic_machine=v70-nec - os=-sysv - ;; - next | m*-next ) - basic_machine=m68k-next - case $os in - -nextstep* ) - ;; - -ns2*) - os=-nextstep2 - ;; - *) - os=-nextstep3 - ;; - esac - ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; - np1) - basic_machine=np1-gould - ;; - neo-tandem) - basic_machine=neo-tandem - ;; - nse-tandem) - basic_machine=nse-tandem - ;; - nsr-tandem) - basic_machine=nsr-tandem - ;; - op50n-* | op60c-*) - basic_machine=hppa1.1-oki - os=-proelf - ;; - openrisc | openrisc-*) - basic_machine=or32-unknown - ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k - ;; - pa-hitachi) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - parisc) - basic_machine=hppa-unknown - os=-linux - ;; - parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - pbd) - basic_machine=sparc-tti - ;; - pbb) - basic_machine=m68k-tti - ;; - pc532 | pc532-*) - basic_machine=ns32k-pc532 - ;; - pc98) - basic_machine=i386-pc - ;; - pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium | p5 | k5 | k6 | nexgen | viac3) - basic_machine=i586-pc - ;; - pentiumpro | p6 | 6x86 | athlon | athlon_*) - basic_machine=i686-pc - ;; - pentiumii | pentium2 | pentiumiii | pentium3) - basic_machine=i686-pc - ;; - pentium4) - basic_machine=i786-pc - ;; - pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pn) - basic_machine=pn-gould - ;; - power) basic_machine=power-ibm - ;; - ppc | ppcbe) basic_machine=powerpc-unknown - ;; - ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppcle | powerpclittle | ppc-le | powerpc-little) - basic_machine=powerpcle-unknown - ;; - ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64) basic_machine=powerpc64-unknown - ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) - basic_machine=powerpc64le-unknown - ;; - ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ps2) - basic_machine=i386-ibm - ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rdos | rdos64) - basic_machine=x86_64-pc - os=-rdos - ;; - rdos32) - basic_machine=i386-pc - os=-rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff - ;; - rm[46]00) - basic_machine=mips-siemens - ;; - rtpc | rtpc-*) - basic_machine=romp-ibm - ;; - s390 | s390-*) - basic_machine=s390-ibm - ;; - s390x | s390x-*) - basic_machine=s390x-ibm - ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; - sb1) - basic_machine=mipsisa64sb1-unknown - ;; - sb1el) - basic_machine=mipsisa64sb1el-unknown - ;; - sde) - basic_machine=mipsisa32-sde - os=-elf - ;; - sei) - basic_machine=mips-sei - os=-seiux - ;; - sequent) - basic_machine=i386-sequent - ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; - sh5el) - basic_machine=sh5le-unknown - ;; - sh64) - basic_machine=sh64-unknown - ;; - sparclite-wrs | simso-wrs) - basic_machine=sparclite-wrs - os=-vxworks - ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 - ;; - spur) - basic_machine=spur-unknown - ;; - st2000) - basic_machine=m68k-tandem - ;; - stratus) - basic_machine=i860-stratus - os=-sysv4 - ;; - strongarm-* | thumb-*) - basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - sun2) - basic_machine=m68000-sun - ;; - sun2os3) - basic_machine=m68000-sun - os=-sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=-sunos4 - ;; - sun3os3) - basic_machine=m68k-sun - os=-sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=-sunos4 - ;; - sun4os3) - basic_machine=sparc-sun - os=-sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=-sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=-solaris2 - ;; - sun3 | sun3-*) - basic_machine=m68k-sun - ;; - sun4) - basic_machine=sparc-sun - ;; - sun386 | sun386i | roadrunner) - basic_machine=i386-sun - ;; - sv1) - basic_machine=sv1-cray - os=-unicos - ;; - symmetry) - basic_machine=i386-sequent - os=-dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=-unicos - ;; - t90) - basic_machine=t90-cray - os=-unicos - ;; - tile*) - basic_machine=$basic_machine-unknown - os=-linux-gnu - ;; - tx39) - basic_machine=mipstx39-unknown - ;; - tx39el) - basic_machine=mipstx39el-unknown - ;; - toad1) - basic_machine=pdp10-xkl - os=-tops20 - ;; - tower | tower-32) - basic_machine=m68k-ncr - ;; - tpf) - basic_machine=s390x-ibm - os=-tpf - ;; - udi29k) - basic_machine=a29k-amd - os=-udi - ;; - ultra3) - basic_machine=a29k-nyu - os=-sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=-none - ;; - vaxv) - basic_machine=vax-dec - os=-sysv - ;; - vms) - basic_machine=vax-dec - os=-vms - ;; - vpp*|vx|vx-*) - basic_machine=f301-fujitsu - ;; - vxworks960) - basic_machine=i960-wrs - os=-vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=-vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=-vxworks - ;; - w65*) - basic_machine=w65-wdc - os=-none - ;; - w89k-*) - basic_machine=hppa1.1-winbond - os=-proelf - ;; - xbox) - basic_machine=i686-pc - os=-mingw32 - ;; - xps | xps100) - basic_machine=xps100-honeywell - ;; - xscale-* | xscalee[bl]-*) - basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` - ;; - ymp) - basic_machine=ymp-cray - os=-unicos - ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - z80-*-coff) - basic_machine=z80-unknown - os=-sim - ;; - none) - basic_machine=none-none - os=-none - ;; - -# Here we handle the default manufacturer of certain CPU types. It is in -# some cases the only manufacturer, in others, it is the most popular. - w89k) - basic_machine=hppa1.1-winbond - ;; - op50n) - basic_machine=hppa1.1-oki - ;; - op60c) - basic_machine=hppa1.1-oki - ;; - romp) - basic_machine=romp-ibm - ;; - mmix) - basic_machine=mmix-knuth - ;; - rs6000) - basic_machine=rs6000-ibm - ;; - vax) - basic_machine=vax-dec - ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; - pdp11) - basic_machine=pdp11-dec - ;; - we32k) - basic_machine=we32k-att - ;; - sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) - basic_machine=sh-unknown - ;; - sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) - basic_machine=sparc-sun - ;; - cydra) - basic_machine=cydra-cydrome - ;; - orion) - basic_machine=orion-highlevel - ;; - orion105) - basic_machine=clipper-highlevel - ;; - mac | mpw | mac-mpw) - basic_machine=m68k-apple - ;; - pmac | pmac-mpw) - basic_machine=powerpc-apple - ;; - *-unknown) - # Make sure to match an already-canonicalized machine name. - ;; - *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; -esac - -# Here we canonicalize certain aliases for manufacturers. -case $basic_machine in - *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` - ;; - *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` - ;; - *) - ;; -esac - -# Decode manufacturer-specific aliases for certain operating systems. - -if [ x"$os" != x"" ] -then -case $os in - # First match some system type aliases - # that might get confused with valid system types. - # -solaris* is a basic system type, with this one exception. - -auroraux) - os=-auroraux - ;; - -solaris1 | -solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` - ;; - -solaris) - os=-solaris2 - ;; - -svr4*) - os=-sysv4 - ;; - -unixware*) - os=-sysv4.2uw - ;; - -gnu/linux*) - os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` - ;; - # First accept the basic system types. - # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. - # -sysv* is not here because it comes later, after sysvr4. - -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ - | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ - | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* | -plan9* \ - | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* \ - | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ - | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -bitrig* | -openbsd* | -solidbsd* \ - | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ - | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ - | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ - | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* \ - | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-musl* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ - | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ - | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ - | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) - # Remember, each alternative MUST END IN *, to match a version number. - ;; - -qnx*) - case $basic_machine in - x86-* | i*86-*) - ;; - *) - os=-nto$os - ;; - esac - ;; - -nto-qnx*) - ;; - -nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` - ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ - | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) - ;; - -mac*) - os=`echo $os | sed -e 's|mac|macos|'` - ;; - -linux-dietlibc) - os=-linux-dietlibc - ;; - -linux*) - os=`echo $os | sed -e 's|linux|linux-gnu|'` - ;; - -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` - ;; - -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` - ;; - -opened*) - os=-openedition - ;; - -os400*) - os=-os400 - ;; - -wince*) - os=-wince - ;; - -osfrose*) - os=-osfrose - ;; - -osf*) - os=-osf - ;; - -utek*) - os=-bsd - ;; - -dynix*) - os=-bsd - ;; - -acis*) - os=-aos - ;; - -atheos*) - os=-atheos - ;; - -syllable*) - os=-syllable - ;; - -386bsd) - os=-bsd - ;; - -ctix* | -uts*) - os=-sysv - ;; - -nova*) - os=-rtmk-nova - ;; - -ns2 ) - os=-nextstep2 - ;; - -nsk*) - os=-nsk - ;; - # Preserve the version number of sinix5. - -sinix5.*) - os=`echo $os | sed -e 's|sinix|sysv|'` - ;; - -sinix*) - os=-sysv4 - ;; - -tpf*) - os=-tpf - ;; - -triton*) - os=-sysv3 - ;; - -oss*) - os=-sysv3 - ;; - -svr4) - os=-sysv4 - ;; - -svr3) - os=-sysv3 - ;; - -sysvr4) - os=-sysv4 - ;; - # This must come after -sysvr4. - -sysv*) - ;; - -ose*) - os=-ose - ;; - -es1800*) - os=-ose - ;; - -xenix) - os=-xenix - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - os=-mint - ;; - -aros*) - os=-aros - ;; - -zvmoe) - os=-zvmoe - ;; - -dicos*) - os=-dicos - ;; - -nacl*) - ;; - -none) - ;; - *) - # Get rid of the `-' at the beginning of $os. - os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 - exit 1 - ;; -esac -else - -# Here we handle the default operating systems that come with various machines. -# The value should be what the vendor currently ships out the door with their -# machine or put another way, the most popular os provided with the machine. - -# Note that if you're going to try to match "-MANUFACTURER" here (say, -# "-sun"), then you have to tell the case statement up towards the top -# that MANUFACTURER isn't an operating system. Otherwise, code above -# will signal an error saying that MANUFACTURER isn't an operating -# system, and we'll never get to this point. - -case $basic_machine in - score-*) - os=-elf - ;; - spu-*) - os=-elf - ;; - *-acorn) - os=-riscix1.2 - ;; - arm*-rebel) - os=-linux - ;; - arm*-semi) - os=-aout - ;; - c4x-* | tic4x-*) - os=-coff - ;; - c8051-*) - os=-elf - ;; - hexagon-*) - os=-elf - ;; - tic54x-*) - os=-coff - ;; - tic55x-*) - os=-coff - ;; - tic6x-*) - os=-coff - ;; - # This must come before the *-dec entry. - pdp10-*) - os=-tops20 - ;; - pdp11-*) - os=-none - ;; - *-dec | vax-*) - os=-ultrix4.2 - ;; - m68*-apollo) - os=-domain - ;; - i386-sun) - os=-sunos4.0.2 - ;; - m68000-sun) - os=-sunos3 - ;; - m68*-cisco) - os=-aout - ;; - mep-*) - os=-elf - ;; - mips*-cisco) - os=-elf - ;; - mips*-*) - os=-elf - ;; - or1k-*) - os=-elf - ;; - or32-*) - os=-coff - ;; - *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 - ;; - sparc-* | *-sun) - os=-sunos4.1.1 - ;; - *-be) - os=-beos - ;; - *-haiku) - os=-haiku - ;; - *-ibm) - os=-aix - ;; - *-knuth) - os=-mmixware - ;; - *-wec) - os=-proelf - ;; - *-winbond) - os=-proelf - ;; - *-oki) - os=-proelf - ;; - *-hp) - os=-hpux - ;; - *-hitachi) - os=-hiux - ;; - i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv - ;; - *-cbm) - os=-amigaos - ;; - *-dg) - os=-dgux - ;; - *-dolphin) - os=-sysv3 - ;; - m68k-ccur) - os=-rtu - ;; - m88k-omron*) - os=-luna - ;; - *-next ) - os=-nextstep - ;; - *-sequent) - os=-ptx - ;; - *-crds) - os=-unos - ;; - *-ns) - os=-genix - ;; - i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 - ;; - *-gould) - os=-sysv - ;; - *-highlevel) - os=-bsd - ;; - *-encore) - os=-bsd - ;; - *-sgi) - os=-irix - ;; - *-siemens) - os=-sysv4 - ;; - *-masscomp) - os=-rtu - ;; - f30[01]-fujitsu | f700-fujitsu) - os=-uxpv - ;; - *-rom68k) - os=-coff - ;; - *-*bug) - os=-coff - ;; - *-apple) - os=-macos - ;; - *-atari*) - os=-mint - ;; - *) - os=-none - ;; -esac -fi - -# Here we handle the case where we know the os, and the CPU type, but not the -# manufacturer. We pick the logical manufacturer. -vendor=unknown -case $basic_machine in - *-unknown) - case $os in - -riscix*) - vendor=acorn - ;; - -sunos*) - vendor=sun - ;; - -cnk*|-aix*) - vendor=ibm - ;; - -beos*) - vendor=be - ;; - -hpux*) - vendor=hp - ;; - -mpeix*) - vendor=hp - ;; - -hiux*) - vendor=hitachi - ;; - -unos*) - vendor=crds - ;; - -dgux*) - vendor=dg - ;; - -luna*) - vendor=omron - ;; - -genix*) - vendor=ns - ;; - -mvs* | -opened*) - vendor=ibm - ;; - -os400*) - vendor=ibm - ;; - -ptx*) - vendor=sequent - ;; - -tpf*) - vendor=ibm - ;; - -vxsim* | -vxworks* | -windiss*) - vendor=wrs - ;; - -aux*) - vendor=apple - ;; - -hms*) - vendor=hitachi - ;; - -mpw* | -macos*) - vendor=apple - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - vendor=atari - ;; - -vos*) - vendor=stratus - ;; - esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` - ;; -esac - -echo $basic_machine$os -exit - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/configure b/configure deleted file mode 100755 index da5c6bf60..000000000 --- a/configure +++ /dev/null @@ -1,9668 +0,0 @@ -#! /bin/sh -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for netdata 1.11.1_rolling. -# -# -# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. -# -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -# Use a proper internal environment variable to ensure we don't fall - # into an infinite loop, continuously re-executing ourselves. - if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then - _as_can_reexec=no; export _as_can_reexec; - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -as_fn_exit 255 - fi - # We don't want this to propagate to other subprocesses. - { _as_can_reexec=; unset _as_can_reexec;} -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1 -test -x / || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - export CONFIG_SHELL - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -exit 255 -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, -$0: including any error possibly output before this -$0: message. Then install a modern shell, or manually run -$0: the script under such a shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # If we had to re-execute with $CONFIG_SHELL, we're ensured to have - # already done that, so ensure we don't try to do so again and fall - # in an infinite loop. This has already happened in practice. - _as_can_reexec=no; export _as_can_reexec - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -test -n "$DJDIR" || exec 7<&0 &1 - -# Name of the host. -# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME='netdata' -PACKAGE_TARNAME='netdata' -PACKAGE_VERSION='1.11.1_rolling' -PACKAGE_STRING='netdata 1.11.1_rolling' -PACKAGE_BUGREPORT='' -PACKAGE_URL='' - -ac_unique_file="daemon/main.c" -# Factoring default headers for most tests. -ac_includes_default="\ -#include -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef STDC_HEADERS -# include -# include -#else -# ifdef HAVE_STDLIB_H -# include -# endif -#endif -#ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include -# endif -# include -#endif -#ifdef HAVE_STRINGS_H -# include -#endif -#ifdef HAVE_INTTYPES_H -# include -#endif -#ifdef HAVE_STDINT_H -# include -#endif -#ifdef HAVE_UNISTD_H -# include -#endif" - -ac_header_list= -ac_func_list= -ac_subst_vars='am__EXEEXT_FALSE -am__EXEEXT_TRUE -LTLIBOBJS -LIBOBJS -OPTIONAL_IPMIMONITORING_LIBS -OPTIONAL_IPMIMONITORING_CFLAGS -OPTIONAL_LIBCAP_LIBS -OPTIONAL_LIBCAP_CFLAGS -OPTIONAL_UUID_LIBS -OPTIONAL_UUID_CLFAGS -OPTIONAL_ZLIB_LIBS -OPTIONAL_ZLIB_CLFAGS -OPTIONAL_NFACCT_LIBS -OPTIONAL_NFACCT_CLFAGS -OPTIONAL_MATH_LIBS -OPTIONAL_MATH_CLFAGS -webdir -pluginsdir -logdir -libconfigdir -configdir -pythondir -nodedir -chartsdir -cachedir -registrydir -varlibdir -build_target -ENABLE_PLUGIN_CGROUP_NETWORK_FALSE -ENABLE_PLUGIN_CGROUP_NETWORK_TRUE -ENABLE_PLUGIN_NFACCT_FALSE -ENABLE_PLUGIN_NFACCT_TRUE -LIBMNL_LIBS -LIBMNL_CFLAGS -NFACCT_LIBS -NFACCT_CFLAGS -ENABLE_PLUGIN_FREEIPMI_FALSE -ENABLE_PLUGIN_FREEIPMI_TRUE -IPMIMONITORING_LIBS -IPMIMONITORING_CFLAGS -ENABLE_PLUGIN_APPS_FALSE -ENABLE_PLUGIN_APPS_TRUE -ENABLE_CAPABILITY_FALSE -ENABLE_CAPABILITY_TRUE -LIBCAP_LIBS -LIBCAP_CFLAGS -has_tcmalloc -has_jemalloc -SSE_CANDIDATE -UUID_LIBS -UUID_CFLAGS -ZLIB_LIBS -ZLIB_CFLAGS -MATH_LIBS -MATH_CFLAGS -PTHREAD_CFLAGS -PTHREAD_LIBS -PTHREAD_CC -ax_pthread_config -LINUX_FALSE -LINUX_TRUE -MACOS_FALSE -MACOS_TRUE -FREEBSD_FALSE -FREEBSD_TRUE -EGREP -GREP -CPP -PKG_CONFIG_LIBDIR -PKG_CONFIG_PATH -PKG_CONFIG -am__fastdepCC_FALSE -am__fastdepCC_TRUE -CCDEPMODE -am__nodep -AMDEPBACKSLASH -AMDEP_FALSE -AMDEP_TRUE -am__quote -am__include -DEPDIR -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CFLAGS -CC -host_os -host_vendor -host_cpu -host -build_os -build_vendor -build_cpu -build -AM_BACKSLASH -AM_DEFAULT_VERBOSITY -AM_DEFAULT_V -AM_V -am__untar -am__tar -AMTAR -am__leading_dot -SET_MAKE -AWK -mkdir_p -MKDIR_P -INSTALL_STRIP_PROGRAM -STRIP -install_sh -MAKEINFO -AUTOHEADER -AUTOMAKE -AUTOCONF -ACLOCAL -VERSION -PACKAGE -CYGPATH_W -am__isrc -INSTALL_DATA -INSTALL_SCRIPT -INSTALL_PROGRAM -PACKAGE_RPM_RELEASE -PACKAGE_RPM_VERSION -MAINT -MAINTAINER_MODE_FALSE -MAINTAINER_MODE_TRUE -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -enable_maintainer_mode -enable_silent_rules -enable_dependency_tracking -enable_plugin_nfacct -enable_plugin_freeipmi -enable_pedantic -enable_accept4 -with_webdir -with_libcap -with_zlib -with_math -with_user -enable_x86_sse -enable_lto -with_jemalloc_prefix -with_jemalloc -with_tcmalloc_lib -with_tcmalloc -' - ac_precious_vars='build_alias -host_alias -target_alias -CC -CFLAGS -LDFLAGS -LIBS -CPPFLAGS -PKG_CONFIG -PKG_CONFIG_PATH -PKG_CONFIG_LIBDIR -CPP -MATH_CFLAGS -MATH_LIBS -ZLIB_CFLAGS -ZLIB_LIBS -UUID_CFLAGS -UUID_LIBS -SSE_CANDIDATE -LIBCAP_CFLAGS -LIBCAP_LIBS -IPMIMONITORING_CFLAGS -IPMIMONITORING_LIBS -NFACCT_CFLAGS -NFACCT_LIBS -LIBMNL_CFLAGS -LIBMNL_LIBS' - - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *=) ac_optarg= ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error $? "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information" - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error $? "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error $? "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error $? "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures netdata 1.11.1_rolling to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking ...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/netdata] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF - -Program names: - --program-prefix=PREFIX prepend PREFIX to installed program names - --program-suffix=SUFFIX append SUFFIX to installed program names - --program-transform-name=PROGRAM run sed PROGRAM on installed program names - -System types: - --build=BUILD configure for building on BUILD [guessed] - --host=HOST cross-compile to build programs to run on HOST [BUILD] -_ACEOF -fi - -if test -n "$ac_init_help"; then - case $ac_init_help in - short | recursive ) echo "Configuration of netdata 1.11.1_rolling:";; - esac - cat <<\_ACEOF - -Optional Features: - --disable-option-checking ignore unrecognized --enable/--with options - --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) - --enable-FEATURE[=ARG] include FEATURE [ARG=yes] - --enable-maintainer-mode - enable make rules and dependencies not useful (and - sometimes confusing) to the casual installer - --enable-silent-rules less verbose build output (undo: "make V=1") - --disable-silent-rules verbose build output (undo: "make V=0") - --enable-dependency-tracking - do not reject slow dependency extractors - --disable-dependency-tracking - speeds up one-time build - --enable-plugin-nfacct enable nfacct plugin, requires running netdata as - root [default disabled] - --enable-plugin-freeipmi - enable freeipmi plugin [default autodetect] - --enable-pedantic enable pedantic compiler warnings [default disabled] - --disable-accept4 System does not have accept4 [default autodetect] - --disable-x86-sse SSE/SS2 optimizations on x86 [default enabled] - --disable-lto Link Time Optimizations [default autodetect] - -Optional Packages: - --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] - --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) - --with-webdir location of webdir [PKGDATADIR/web] - --with-libcap build with libcap [default autodetect] - --without-zlib build without zlib [default enabled] - --without-math build without math [default enabled] - --with-user use this user to drop privilege [default nobody] - --with-jemalloc-prefix=PREFIX - Specify the jemalloc prefix [default=""] - --with-jemalloc=DIR use a specific jemalloc library - --with-tcmalloc-lib specify the tcmalloc library to use - [default=tcmalloc] - --with-tcmalloc=DIR use the tcmalloc library - -Some influential environment variables: - CC C compiler command - CFLAGS C compiler flags - LDFLAGS linker flags, e.g. -L if you have libraries in a - nonstandard directory - LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if - you have headers in a nonstandard directory - PKG_CONFIG path to pkg-config utility - PKG_CONFIG_PATH - directories to add to pkg-config's search path - PKG_CONFIG_LIBDIR - path overriding pkg-config's built-in search path - CPP C preprocessor - MATH_CFLAGS C compiler flags for math - MATH_LIBS linker flags for math - ZLIB_CFLAGS C compiler flags for ZLIB, overriding pkg-config - ZLIB_LIBS linker flags for ZLIB, overriding pkg-config - UUID_CFLAGS C compiler flags for UUID, overriding pkg-config - UUID_LIBS linker flags for UUID, overriding pkg-config - SSE_CANDIDATE - C compiler flags for SSE - LIBCAP_CFLAGS - C compiler flags for LIBCAP, overriding pkg-config - LIBCAP_LIBS linker flags for LIBCAP, overriding pkg-config - IPMIMONITORING_CFLAGS - C compiler flags for IPMIMONITORING, overriding pkg-config - IPMIMONITORING_LIBS - linker flags for IPMIMONITORING, overriding pkg-config - NFACCT_CFLAGS - C compiler flags for NFACCT, overriding pkg-config - NFACCT_LIBS linker flags for NFACCT, overriding pkg-config - LIBMNL_CFLAGS - C compiler flags for LIBMNL, overriding pkg-config - LIBMNL_LIBS linker flags for LIBMNL, overriding pkg-config - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to the package provider. -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -netdata configure 1.11.1_rolling -generated by GNU Autoconf 2.69 - -Copyright (C) 2012 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } > conftest.i && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists, giving a warning if it cannot be compiled using -# the include files in INCLUDES and setting the cache variable VAR -# accordingly. -ac_fn_c_check_header_mongrel () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if eval \${$3+:} false; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -else - # Is the header compilable? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 -$as_echo_n "checking $2 usability... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_header_compiler=yes -else - ac_header_compiler=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 -$as_echo "$ac_header_compiler" >&6; } - -# Is the header present? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 -$as_echo_n "checking $2 presence... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <$2> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_header_preproc=yes -else - ac_header_preproc=no -fi -rm -f conftest.err conftest.i conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 -$as_echo "$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( - yes:no: ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 -$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; - no:yes:* ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 -$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 -$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 -$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 -$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; -esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=\$ac_header_compiler" -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_mongrel - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_compile - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - test -x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_link - -# ac_fn_c_check_type LINENO TYPE VAR INCLUDES -# ------------------------------------------- -# Tests whether TYPE exists after having included INCLUDES, setting cache -# variable VAR accordingly. -ac_fn_c_check_type () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=no" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof ($2)) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof (($2))) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - eval "$3=yes" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_type - -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case declares $2. - For example, HP-UX 11i declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_func - -# ac_fn_c_find_intX_t LINENO BITS VAR -# ----------------------------------- -# Finds a signed integer type with width BITS, setting cache variable VAR -# accordingly. -ac_fn_c_find_intX_t () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5 -$as_echo_n "checking for int$2_t... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=no" - # Order is important - never check a type that is potentially smaller - # than half of the expected target width. - for ac_type in int$2_t 'int' 'long int' \ - 'long long int' 'short int' 'signed char'; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default - enum { N = $2 / 2 - 1 }; -int -main () -{ -static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))]; -test_array [0] = 0; -return test_array [0]; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default - enum { N = $2 / 2 - 1 }; -int -main () -{ -static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1) - < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))]; -test_array [0] = 0; -return test_array [0]; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - case $ac_type in #( - int$2_t) : - eval "$3=yes" ;; #( - *) : - eval "$3=\$ac_type" ;; -esac -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - if eval test \"x\$"$3"\" = x"no"; then : - -else - break -fi - done -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_find_intX_t - -# ac_fn_c_find_uintX_t LINENO BITS VAR -# ------------------------------------ -# Finds an unsigned integer type with width BITS, setting cache variable VAR -# accordingly. -ac_fn_c_find_uintX_t () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5 -$as_echo_n "checking for uint$2_t... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=no" - # Order is important - never check a type that is potentially smaller - # than half of the expected target width. - for ac_type in uint$2_t 'unsigned int' 'unsigned long int' \ - 'unsigned long long int' 'unsigned short int' 'unsigned char'; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default -int -main () -{ -static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)]; -test_array [0] = 0; -return test_array [0]; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - case $ac_type in #( - uint$2_t) : - eval "$3=yes" ;; #( - *) : - eval "$3=\$ac_type" ;; -esac -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - if eval test \"x\$"$3"\" = x"no"; then : - -else - break -fi - done -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_find_uintX_t - -# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES -# --------------------------------------------- -# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR -# accordingly. -ac_fn_c_check_decl () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - as_decl_name=`echo $2|sed 's/ *(.*//'` - as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 -$as_echo_n "checking whether $as_decl_name is declared... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -#ifndef $as_decl_name -#ifdef __cplusplus - (void) $as_decl_use; -#else - (void) $as_decl_name; -#endif -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_decl - -# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES -# -------------------------------------------- -# Tries to find the compile-time value of EXPR in a program that includes -# INCLUDES, setting VAR accordingly. Returns whether the value could be -# computed -ac_fn_c_compute_int () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if test "$cross_compiling" = yes; then - # Depending upon the size, compute the lo and hi bounds. -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) >= 0)]; -test_array [0] = 0; -return test_array [0]; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_lo=0 ac_mid=0 - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) <= $ac_mid)]; -test_array [0] = 0; -return test_array [0]; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=$ac_mid; break -else - as_fn_arith $ac_mid + 1 && ac_lo=$as_val - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - done -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) < 0)]; -test_array [0] = 0; -return test_array [0]; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=-1 ac_mid=-1 - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) >= $ac_mid)]; -test_array [0] = 0; -return test_array [0]; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_lo=$ac_mid; break -else - as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - done -else - ac_lo= ac_hi= -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -# Binary search between lo and hi bounds. -while test "x$ac_lo" != "x$ac_hi"; do - as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) <= $ac_mid)]; -test_array [0] = 0; -return test_array [0]; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=$ac_mid -else - as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -done -case $ac_lo in #(( -?*) eval "$3=\$ac_lo"; ac_retval=0 ;; -'') ac_retval=1 ;; -esac - else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -static long int longval () { return $2; } -static unsigned long int ulongval () { return $2; } -#include -#include -int -main () -{ - - FILE *f = fopen ("conftest.val", "w"); - if (! f) - return 1; - if (($2) < 0) - { - long int i = longval (); - if (i != ($2)) - return 1; - fprintf (f, "%ld", i); - } - else - { - unsigned long int i = ulongval (); - if (i != ($2)) - return 1; - fprintf (f, "%lu", i); - } - /* Do not output a trailing newline, as this causes \r\n confusion - on some platforms. */ - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - echo >>conftest.val; read $3 config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by netdata $as_me 1.11.1_rolling, which was -generated by GNU Autoconf 2.69. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - $as_echo "## ---------------- ## -## Cache variables. ## -## ---------------- ##" - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - $as_echo "## ----------------- ## -## Output variables. ## -## ----------------- ##" - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - $as_echo "## ------------------- ## -## File substitutions. ## -## ------------------- ##" - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - $as_echo "## ----------- ## -## confdefs.h. ## -## ----------- ##" - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - # We do not want a PATH search for config.site. - case $CONFIG_SITE in #(( - -*) ac_site_file1=./$CONFIG_SITE;; - */*) ac_site_file1=$CONFIG_SITE;; - *) ac_site_file1=./$CONFIG_SITE;; - esac -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" \ - || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "failed to load site script $ac_site_file -See \`config.log' for more details" "$LINENO" 5; } - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -as_fn_append ac_header_list " sys/prctl.h" -as_fn_append ac_header_list " sys/vfs.h" -as_fn_append ac_header_list " sys/statfs.h" -as_fn_append ac_header_list " sys/statvfs.h" -as_fn_append ac_header_list " sys/mount.h" -as_fn_append ac_func_list " accept4" -as_fn_append ac_header_list " linux/netfilter/nfnetlink_conntrack.h" -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 -$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } - # Check whether --enable-maintainer-mode was given. -if test "${enable_maintainer_mode+set}" = set; then : - enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval -else - USE_MAINTAINER_MODE=no -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 -$as_echo "$USE_MAINTAINER_MODE" >&6; } - if test $USE_MAINTAINER_MODE = yes; then - MAINTAINER_MODE_TRUE= - MAINTAINER_MODE_FALSE='#' -else - MAINTAINER_MODE_TRUE='#' - MAINTAINER_MODE_FALSE= -fi - - MAINT=$MAINTAINER_MODE_TRUE - - -if test x"$USE_MAINTAINER_MODE" = xyes; then -{ $as_echo "$as_me:${as_lineno-$LINENO}: ***************** MAINTAINER MODE *****************" >&5 -$as_echo "$as_me: ***************** MAINTAINER MODE *****************" >&6;} -PACKAGE_BUILT_DATE=$(date '+%d %b %Y') -fi - -PACKAGE_RPM_VERSION="1.11.0" - - - - -# ----------------------------------------------------------------------------- -# autoconf initialization - -ac_aux_dir= -for ac_dir in . "$srcdir"/.; do - if test -f "$ac_dir/install-sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install-sh -c" - break - elif test -f "$ac_dir/install.sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install.sh -c" - break - elif test -f "$ac_dir/shtool"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/shtool install -c" - break - fi -done -if test -z "$ac_aux_dir"; then - as_fn_error $? "cannot find install-sh, install.sh, or shtool in . \"$srcdir\"/." "$LINENO" 5 -fi - -# These three variables are undocumented and unsupported, -# and are intended to be withdrawn in a future Autoconf release. -# They can cause serious problems if a builder's source tree is in a directory -# whose full name contains unusual characters. -ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. -ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. -ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. - - -ac_config_headers="$ac_config_headers config.h" - - - - - - - -am__api_version='1.14' - -# Find a good install program. We prefer a C program (faster), -# so one script is as good as another. But avoid the broken or -# incompatible versions: -# SysV /etc/install, /usr/sbin/install -# SunOS /usr/etc/install -# IRIX /sbin/install -# AIX /bin/install -# AmigaOS /C/install, which installs bootblocks on floppy discs -# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag -# AFS /usr/afsws/bin/install, which mishandles nonexistent args -# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" -# OS/2's system install, which has a completely different semantic -# ./install, which can be erroneously created by make from ./install.sh. -# Reject install programs that cannot install multiple files. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 -$as_echo_n "checking for a BSD-compatible install... " >&6; } -if test -z "$INSTALL"; then -if ${ac_cv_path_install+:} false; then : - $as_echo_n "(cached) " >&6 -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - # Account for people who put trailing slashes in PATH elements. -case $as_dir/ in #(( - ./ | .// | /[cC]/* | \ - /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ - ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ - /usr/ucb/* ) ;; - *) - # OSF1 and SCO ODT 3.0 have their own names for install. - # Don't use installbsd from OSF since it installs stuff as root - # by default. - for ac_prog in ginstall scoinst install; do - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then - if test $ac_prog = install && - grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # AIX install. It has an incompatible calling convention. - : - elif test $ac_prog = install && - grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # program-specific install script used by HP pwplus--don't use. - : - else - rm -rf conftest.one conftest.two conftest.dir - echo one > conftest.one - echo two > conftest.two - mkdir conftest.dir - if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && - test -s conftest.one && test -s conftest.two && - test -s conftest.dir/conftest.one && - test -s conftest.dir/conftest.two - then - ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" - break 3 - fi - fi - fi - done - done - ;; -esac - - done -IFS=$as_save_IFS - -rm -rf conftest.one conftest.two conftest.dir - -fi - if test "${ac_cv_path_install+set}" = set; then - INSTALL=$ac_cv_path_install - else - # As a last resort, use the slow shell script. Don't cache a - # value for INSTALL within a source directory, because that will - # break other packages using the cache if that directory is - # removed, or if the value is a relative name. - INSTALL=$ac_install_sh - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 -$as_echo "$INSTALL" >&6; } - -# Use test -z because SunOS4 sh mishandles braces in ${var-val}. -# It thinks the first close brace ends the variable substitution. -test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' - -test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' - -test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 -$as_echo_n "checking whether build environment is sane... " >&6; } -# Reject unsafe characters in $srcdir or the absolute working directory -# name. Accept space and tab only in the latter. -am_lf=' -' -case `pwd` in - *[\\\"\#\$\&\'\`$am_lf]*) - as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; -esac -case $srcdir in - *[\\\"\#\$\&\'\`$am_lf\ \ ]*) - as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; -esac - -# Do 'set' in a subshell so we don't clobber the current shell's -# arguments. Must try -L first in case configure is actually a -# symlink; some systems play weird games with the mod time of symlinks -# (eg FreeBSD returns the mod time of the symlink's containing -# directory). -if ( - am_has_slept=no - for am_try in 1 2; do - echo "timestamp, slept: $am_has_slept" > conftest.file - set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` - if test "$*" = "X"; then - # -L didn't work. - set X `ls -t "$srcdir/configure" conftest.file` - fi - if test "$*" != "X $srcdir/configure conftest.file" \ - && test "$*" != "X conftest.file $srcdir/configure"; then - - # If neither matched, then we have a broken ls. This can happen - # if, for instance, CONFIG_SHELL is bash and it inherits a - # broken ls alias from the environment. This has actually - # happened. Such a system could not be considered "sane". - as_fn_error $? "ls -t appears to fail. Make sure there is not a broken - alias in your environment" "$LINENO" 5 - fi - if test "$2" = conftest.file || test $am_try -eq 2; then - break - fi - # Just in case. - sleep 1 - am_has_slept=yes - done - test "$2" = conftest.file - ) -then - # Ok. - : -else - as_fn_error $? "newly created file is older than distributed files! -Check your system clock" "$LINENO" 5 -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -# If we didn't sleep, we still need to ensure time stamps of config.status and -# generated files are strictly newer. -am_sleep_pid= -if grep 'slept: no' conftest.file >/dev/null 2>&1; then - ( sleep 1 ) & - am_sleep_pid=$! -fi - -rm -f conftest.file - -test "$program_prefix" != NONE && - program_transform_name="s&^&$program_prefix&;$program_transform_name" -# Use a double $ so make ignores it. -test "$program_suffix" != NONE && - program_transform_name="s&\$&$program_suffix&;$program_transform_name" -# Double any \ or $. -# By default was `s,x,x', remove it if useless. -ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' -program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` - -# expand $ac_aux_dir to an absolute path -am_aux_dir=`cd $ac_aux_dir && pwd` - -if test x"${MISSING+set}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; - *) - MISSING="\${SHELL} $am_aux_dir/missing" ;; - esac -fi -# Use eval to expand $SHELL -if eval "$MISSING --is-lightweight"; then - am_missing_run="$MISSING " -else - am_missing_run= - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 -$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} -fi - -if test x"${install_sh}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; - *) - install_sh="\${SHELL} $am_aux_dir/install-sh" - esac -fi - -# Installed binaries are usually stripped using 'strip' when the user -# run "make install-strip". However 'strip' might not be the right -# tool to use in cross-compilation environments, therefore Automake -# will honor the 'STRIP' environment variable to overrule this program. -if test "$cross_compiling" != no; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. -set dummy ${ac_tool_prefix}strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_STRIP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$STRIP"; then - ac_cv_prog_STRIP="$STRIP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_STRIP="${ac_tool_prefix}strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -STRIP=$ac_cv_prog_STRIP -if test -n "$STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 -$as_echo "$STRIP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_STRIP"; then - ac_ct_STRIP=$STRIP - # Extract the first word of "strip", so it can be a program name with args. -set dummy strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_STRIP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_STRIP"; then - ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_STRIP="strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP -if test -n "$ac_ct_STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 -$as_echo "$ac_ct_STRIP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_STRIP" = x; then - STRIP=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - STRIP=$ac_ct_STRIP - fi -else - STRIP="$ac_cv_prog_STRIP" -fi - -fi -INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 -$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } -if test -z "$MKDIR_P"; then - if ${ac_cv_path_mkdir+:} false; then : - $as_echo_n "(cached) " >&6 -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in mkdir gmkdir; do - for ac_exec_ext in '' $ac_executable_extensions; do - as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue - case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( - 'mkdir (GNU coreutils) '* | \ - 'mkdir (coreutils) '* | \ - 'mkdir (fileutils) '4.1*) - ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext - break 3;; - esac - done - done - done -IFS=$as_save_IFS - -fi - - test -d ./--version && rmdir ./--version - if test "${ac_cv_path_mkdir+set}" = set; then - MKDIR_P="$ac_cv_path_mkdir -p" - else - # As a last resort, use the slow shell script. Don't cache a - # value for MKDIR_P within a source directory, because that will - # break other packages using the cache if that directory is - # removed, or if the value is a relative name. - MKDIR_P="$ac_install_sh -d" - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 -$as_echo "$MKDIR_P" >&6; } - -for ac_prog in gawk mawk nawk awk -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_AWK+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$AWK"; then - ac_cv_prog_AWK="$AWK" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_AWK="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -AWK=$ac_cv_prog_AWK -if test -n "$AWK"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 -$as_echo "$AWK" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$AWK" && break -done - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 -$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } -set x ${MAKE-make} -ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` -if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat >conftest.make <<\_ACEOF -SHELL = /bin/sh -all: - @echo '@@@%%%=$(MAKE)=@@@%%%' -_ACEOF -# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. -case `${MAKE-make} -f conftest.make 2>/dev/null` in - *@@@%%%=?*=@@@%%%*) - eval ac_cv_prog_make_${ac_make}_set=yes;; - *) - eval ac_cv_prog_make_${ac_make}_set=no;; -esac -rm -f conftest.make -fi -if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - SET_MAKE= -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - SET_MAKE="MAKE=${MAKE-make}" -fi - -rm -rf .tst 2>/dev/null -mkdir .tst 2>/dev/null -if test -d .tst; then - am__leading_dot=. -else - am__leading_dot=_ -fi -rmdir .tst 2>/dev/null - -# Check whether --enable-silent-rules was given. -if test "${enable_silent_rules+set}" = set; then : - enableval=$enable_silent_rules; -fi - -case $enable_silent_rules in # ((( - yes) AM_DEFAULT_VERBOSITY=0;; - no) AM_DEFAULT_VERBOSITY=1;; - *) AM_DEFAULT_VERBOSITY=1;; -esac -am_make=${MAKE-make} -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 -$as_echo_n "checking whether $am_make supports nested variables... " >&6; } -if ${am_cv_make_support_nested_variables+:} false; then : - $as_echo_n "(cached) " >&6 -else - if $as_echo 'TRUE=$(BAR$(V)) -BAR0=false -BAR1=true -V=1 -am__doit: - @$(TRUE) -.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then - am_cv_make_support_nested_variables=yes -else - am_cv_make_support_nested_variables=no -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 -$as_echo "$am_cv_make_support_nested_variables" >&6; } -if test $am_cv_make_support_nested_variables = yes; then - AM_V='$(V)' - AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' -else - AM_V=$AM_DEFAULT_VERBOSITY - AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY -fi -AM_BACKSLASH='\' - -if test "`cd $srcdir && pwd`" != "`pwd`"; then - # Use -I$(srcdir) only when $(srcdir) != ., so that make's output - # is not polluted with repeated "-I." - am__isrc=' -I$(srcdir)' - # test to see if srcdir already configured - if test -f $srcdir/config.status; then - as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 - fi -fi - -# test whether we have cygpath -if test -z "$CYGPATH_W"; then - if (cygpath --version) >/dev/null 2>/dev/null; then - CYGPATH_W='cygpath -w' - else - CYGPATH_W=echo - fi -fi - - -# Define the identity of the package. - PACKAGE='netdata' - VERSION='1.11.1_rolling' - - -cat >>confdefs.h <<_ACEOF -#define PACKAGE "$PACKAGE" -_ACEOF - - -cat >>confdefs.h <<_ACEOF -#define VERSION "$VERSION" -_ACEOF - -# Some tools Automake needs. - -ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} - - -AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} - - -AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} - - -AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} - - -MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} - -# For better backward compatibility. To be removed once Automake 1.9.x -# dies out for good. For more background, see: -# -# -mkdir_p='$(MKDIR_P)' - -# We need awk for the "check" target. The system "awk" is bad on -# some platforms. -# Always define AMTAR for backward compatibility. Yes, it's still used -# in the wild :-( We should find a proper way to deprecate it ... -AMTAR='$${TAR-tar}' - - -# We'll loop over all known methods to create a tar archive until one works. -_am_tools='gnutar pax cpio none' - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to create a pax tar archive" >&5 -$as_echo_n "checking how to create a pax tar archive... " >&6; } - - # Go ahead even if we have the value already cached. We do so because we - # need to set the values for the 'am__tar' and 'am__untar' variables. - _am_tools=${am_cv_prog_tar_pax-$_am_tools} - - for _am_tool in $_am_tools; do - case $_am_tool in - gnutar) - for _am_tar in tar gnutar gtar; do - { echo "$as_me:$LINENO: $_am_tar --version" >&5 - ($_am_tar --version) >&5 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && break - done - am__tar="$_am_tar --format=posix -chf - "'"$$tardir"' - am__tar_="$_am_tar --format=posix -chf - "'"$tardir"' - am__untar="$_am_tar -xf -" - ;; - plaintar) - # Must skip GNU tar: if it does not support --format= it doesn't create - # ustar tarball either. - (tar --version) >/dev/null 2>&1 && continue - am__tar='tar chf - "$$tardir"' - am__tar_='tar chf - "$tardir"' - am__untar='tar xf -' - ;; - pax) - am__tar='pax -L -x pax -w "$$tardir"' - am__tar_='pax -L -x pax -w "$tardir"' - am__untar='pax -r' - ;; - cpio) - am__tar='find "$$tardir" -print | cpio -o -H pax -L' - am__tar_='find "$tardir" -print | cpio -o -H pax -L' - am__untar='cpio -i -H pax -d' - ;; - none) - am__tar=false - am__tar_=false - am__untar=false - ;; - esac - - # If the value was cached, stop now. We just wanted to have am__tar - # and am__untar set. - test -n "${am_cv_prog_tar_pax}" && break - - # tar/untar a dummy directory, and stop if the command works. - rm -rf conftest.dir - mkdir conftest.dir - echo GrepMe > conftest.dir/file - { echo "$as_me:$LINENO: tardir=conftest.dir && eval $am__tar_ >conftest.tar" >&5 - (tardir=conftest.dir && eval $am__tar_ >conftest.tar) >&5 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } - rm -rf conftest.dir - if test -s conftest.tar; then - { echo "$as_me:$LINENO: $am__untar &5 - ($am__untar &5 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } - { echo "$as_me:$LINENO: cat conftest.dir/file" >&5 - (cat conftest.dir/file) >&5 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } - grep GrepMe conftest.dir/file >/dev/null 2>&1 && break - fi - done - rm -rf conftest.dir - - if ${am_cv_prog_tar_pax+:} false; then : - $as_echo_n "(cached) " >&6 -else - am_cv_prog_tar_pax=$_am_tool -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_tar_pax" >&5 -$as_echo "$am_cv_prog_tar_pax" >&6; } - - - - - - -# POSIX will say in a future version that running "rm -f" with no argument -# is OK; and we want to be able to make that assumption in our Makefile -# recipes. So use an aggressive probe to check that the usage we want is -# actually supported "in the wild" to an acceptable degree. -# See automake bug#10828. -# To make any issue more visible, cause the running configure to be aborted -# by default if the 'rm' program in use doesn't match our expectations; the -# user can still override this though. -if rm -f && rm -fr && rm -rf; then : OK; else - cat >&2 <<'END' -Oops! - -Your 'rm' program seems unable to run without file operands specified -on the command line, even when the '-f' option is present. This is contrary -to the behaviour of most rm programs out there, and not conforming with -the upcoming POSIX standard: - -Please tell bug-automake@gnu.org about your system, including the value -of your $PATH and any error possibly output before this message. This -can help us improve future automake versions. - -END - if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then - echo 'Configuration will proceed anyway, since you have set the' >&2 - echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 - echo >&2 - else - cat >&2 <<'END' -Aborting the configuration process, to ensure you take notice of the issue. - -You can download and install GNU coreutils to get an 'rm' implementation -that behaves properly: . - -If you want to complete the configuration process using your problematic -'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM -to "yes", and re-run configure. - -END - as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 - fi -fi - - # Check whether --enable-silent-rules was given. -if test "${enable_silent_rules+set}" = set; then : - enableval=$enable_silent_rules; -fi - -case $enable_silent_rules in # ((( - yes) AM_DEFAULT_VERBOSITY=0;; - no) AM_DEFAULT_VERBOSITY=1;; - *) AM_DEFAULT_VERBOSITY=0;; -esac -am_make=${MAKE-make} -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 -$as_echo_n "checking whether $am_make supports nested variables... " >&6; } -if ${am_cv_make_support_nested_variables+:} false; then : - $as_echo_n "(cached) " >&6 -else - if $as_echo 'TRUE=$(BAR$(V)) -BAR0=false -BAR1=true -V=1 -am__doit: - @$(TRUE) -.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then - am_cv_make_support_nested_variables=yes -else - am_cv_make_support_nested_variables=no -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 -$as_echo "$am_cv_make_support_nested_variables" >&6; } -if test $am_cv_make_support_nested_variables = yes; then - AM_V='$(V)' - AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' -else - AM_V=$AM_DEFAULT_VERBOSITY - AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY -fi -AM_BACKSLASH='\' - - -# Make sure we can run config.sub. -$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || - as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 -$as_echo_n "checking build system type... " >&6; } -if ${ac_cv_build+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_build_alias=$build_alias -test "x$ac_build_alias" = x && - ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` -test "x$ac_build_alias" = x && - as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 -ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 -$as_echo "$ac_cv_build" >&6; } -case $ac_cv_build in -*-*-*) ;; -*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; -esac -build=$ac_cv_build -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_build -shift -build_cpu=$1 -build_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -build_os=$* -IFS=$ac_save_IFS -case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 -$as_echo_n "checking host system type... " >&6; } -if ${ac_cv_host+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "x$host_alias" = x; then - ac_cv_host=$ac_cv_build -else - ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 -$as_echo "$ac_cv_host" >&6; } -case $ac_cv_host in -*-*-*) ;; -*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; -esac -host=$ac_cv_host -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_host -shift -host_cpu=$1 -host_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -host_os=$* -IFS=$ac_save_IFS -case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "no acceptable C compiler found in \$PATH -See \`config.log' for more details" "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "C compiler cannot create executables -See \`config.log' for more details" "$LINENO" 5; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -ac_exeext=$ac_cv_exeext - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details" "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if ${ac_cv_objext+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of object files: cannot compile -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -struct stat; -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 -$as_echo_n "checking whether $CC understands -c and -o together... " >&6; } -if ${am_cv_prog_cc_c_o+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF - # Make sure it works both with $CC and with simple cc. - # Following AC_PROG_CC_C_O, we do the test twice because some - # compilers refuse to overwrite an existing .o file with -o, - # though they will create one. - am_cv_prog_cc_c_o=yes - for am_i in 1 2; do - if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 - ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } \ - && test -f conftest2.$ac_objext; then - : OK - else - am_cv_prog_cc_c_o=no - break - fi - done - rm -f core conftest* - unset am_i -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 -$as_echo "$am_cv_prog_cc_c_o" >&6; } -if test "$am_cv_prog_cc_c_o" != yes; then - # Losing compiler, so override with the script. - # FIXME: It is wrong to rewrite CC. - # But if we don't then we get into trouble of one sort or another. - # A longer-term fix would be to have automake use am__CC in this case, - # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" - CC="$am_aux_dir/compile $CC" -fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -DEPDIR="${am__leading_dot}deps" - -ac_config_commands="$ac_config_commands depfiles" - - -am_make=${MAKE-make} -cat > confinc << 'END' -am__doit: - @echo this is the am__doit target -.PHONY: am__doit -END -# If we don't find an include directive, just comment out the code. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 -$as_echo_n "checking for style of include used by $am_make... " >&6; } -am__include="#" -am__quote= -_am_result=none -# First try GNU make style include. -echo "include confinc" > confmf -# Ignore all kinds of additional output from 'make'. -case `$am_make -s -f confmf 2> /dev/null` in #( -*the\ am__doit\ target*) - am__include=include - am__quote= - _am_result=GNU - ;; -esac -# Now try BSD make style include. -if test "$am__include" = "#"; then - echo '.include "confinc"' > confmf - case `$am_make -s -f confmf 2> /dev/null` in #( - *the\ am__doit\ target*) - am__include=.include - am__quote="\"" - _am_result=BSD - ;; - esac -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 -$as_echo "$_am_result" >&6; } -rm -f confinc confmf - -# Check whether --enable-dependency-tracking was given. -if test "${enable_dependency_tracking+set}" = set; then : - enableval=$enable_dependency_tracking; -fi - -if test "x$enable_dependency_tracking" != xno; then - am_depcomp="$ac_aux_dir/depcomp" - AMDEPBACKSLASH='\' - am__nodep='_no' -fi - if test "x$enable_dependency_tracking" != xno; then - AMDEP_TRUE= - AMDEP_FALSE='#' -else - AMDEP_TRUE='#' - AMDEP_FALSE= -fi - - - -depcc="$CC" am_compiler_list= - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 -$as_echo_n "checking dependency style of $depcc... " >&6; } -if ${am_cv_CC_dependencies_compiler_type+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then - # We make a subdir and do the tests there. Otherwise we can end up - # making bogus files that we don't know about and never remove. For - # instance it was reported that on HP-UX the gcc test will end up - # making a dummy file named 'D' -- because '-MD' means "put the output - # in D". - rm -rf conftest.dir - mkdir conftest.dir - # Copy depcomp to subdir because otherwise we won't find it if we're - # using a relative directory. - cp "$am_depcomp" conftest.dir - cd conftest.dir - # We will build objects and dependencies in a subdirectory because - # it helps to detect inapplicable dependency modes. For instance - # both Tru64's cc and ICC support -MD to output dependencies as a - # side effect of compilation, but ICC will put the dependencies in - # the current directory while Tru64 will put them in the object - # directory. - mkdir sub - - am_cv_CC_dependencies_compiler_type=none - if test "$am_compiler_list" = ""; then - am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` - fi - am__universal=false - case " $depcc " in #( - *\ -arch\ *\ -arch\ *) am__universal=true ;; - esac - - for depmode in $am_compiler_list; do - # Setup a source with many dependencies, because some compilers - # like to wrap large dependency lists on column 80 (with \), and - # we should not choose a depcomp mode which is confused by this. - # - # We need to recreate these files for each test, as the compiler may - # overwrite some of them when testing with obscure command lines. - # This happens at least with the AIX C compiler. - : > sub/conftest.c - for i in 1 2 3 4 5 6; do - echo '#include "conftst'$i'.h"' >> sub/conftest.c - # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with - # Solaris 10 /bin/sh. - echo '/* dummy */' > sub/conftst$i.h - done - echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf - - # We check with '-c' and '-o' for the sake of the "dashmstdout" - # mode. It turns out that the SunPro C++ compiler does not properly - # handle '-M -o', and we need to detect this. Also, some Intel - # versions had trouble with output in subdirs. - am__obj=sub/conftest.${OBJEXT-o} - am__minus_obj="-o $am__obj" - case $depmode in - gcc) - # This depmode causes a compiler race in universal mode. - test "$am__universal" = false || continue - ;; - nosideeffect) - # After this tag, mechanisms are not by side-effect, so they'll - # only be used when explicitly requested. - if test "x$enable_dependency_tracking" = xyes; then - continue - else - break - fi - ;; - msvc7 | msvc7msys | msvisualcpp | msvcmsys) - # This compiler won't grok '-c -o', but also, the minuso test has - # not run yet. These depmodes are late enough in the game, and - # so weak that their functioning should not be impacted. - am__obj=conftest.${OBJEXT-o} - am__minus_obj= - ;; - none) break ;; - esac - if depmode=$depmode \ - source=sub/conftest.c object=$am__obj \ - depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ - $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ - >/dev/null 2>conftest.err && - grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && - grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && - grep $am__obj sub/conftest.Po > /dev/null 2>&1 && - ${MAKE-make} -s -f confmf > /dev/null 2>&1; then - # icc doesn't choke on unknown options, it will just issue warnings - # or remarks (even with -Werror). So we grep stderr for any message - # that says an option was ignored or not supported. - # When given -MP, icc 7.0 and 7.1 complain thusly: - # icc: Command line warning: ignoring option '-M'; no argument required - # The diagnosis changed in icc 8.0: - # icc: Command line remark: option '-MP' not supported - if (grep 'ignoring option' conftest.err || - grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else - am_cv_CC_dependencies_compiler_type=$depmode - break - fi - fi - done - - cd .. - rm -rf conftest.dir -else - am_cv_CC_dependencies_compiler_type=none -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 -$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } -CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type - - if - test "x$enable_dependency_tracking" != xno \ - && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then - am__fastdepCC_TRUE= - am__fastdepCC_FALSE='#' -else - am__fastdepCC_TRUE='#' - am__fastdepCC_FALSE= -fi - - - - - - - - - - - -if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. -set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_PKG_CONFIG+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $PKG_CONFIG in - [\\/]* | ?:[\\/]*) - ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - ;; -esac -fi -PKG_CONFIG=$ac_cv_path_PKG_CONFIG -if test -n "$PKG_CONFIG"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 -$as_echo "$PKG_CONFIG" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_path_PKG_CONFIG"; then - ac_pt_PKG_CONFIG=$PKG_CONFIG - # Extract the first word of "pkg-config", so it can be a program name with args. -set dummy pkg-config; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $ac_pt_PKG_CONFIG in - [\\/]* | ?:[\\/]*) - ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - ;; -esac -fi -ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG -if test -n "$ac_pt_PKG_CONFIG"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 -$as_echo "$ac_pt_PKG_CONFIG" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_pt_PKG_CONFIG" = x; then - PKG_CONFIG="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - PKG_CONFIG=$ac_pt_PKG_CONFIG - fi -else - PKG_CONFIG="$ac_cv_path_PKG_CONFIG" -fi - -fi -if test -n "$PKG_CONFIG"; then - _pkg_min_version=0.9.0 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 -$as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } - if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - PKG_CONFIG="" - fi -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if ${ac_cv_prog_CPP+:} false; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if ${ac_cv_path_GREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_GREP" || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if ${ac_cv_path_EGREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_EGREP" || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif - -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - - - ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default" -if test "x$ac_cv_header_minix_config_h" = xyes; then : - MINIX=yes -else - MINIX= -fi - - - if test "$MINIX" = yes; then - -$as_echo "#define _POSIX_SOURCE 1" >>confdefs.h - - -$as_echo "#define _POSIX_1_SOURCE 2" >>confdefs.h - - -$as_echo "#define _MINIX 1" >>confdefs.h - - fi - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5 -$as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; } -if ${ac_cv_safe_to_define___extensions__+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -# define __EXTENSIONS__ 1 - $ac_includes_default -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_safe_to_define___extensions__=yes -else - ac_cv_safe_to_define___extensions__=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5 -$as_echo "$ac_cv_safe_to_define___extensions__" >&6; } - test $ac_cv_safe_to_define___extensions__ = yes && - $as_echo "#define __EXTENSIONS__ 1" >>confdefs.h - - $as_echo "#define _ALL_SOURCE 1" >>confdefs.h - - $as_echo "#define _GNU_SOURCE 1" >>confdefs.h - - $as_echo "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h - - $as_echo "#define _TANDEM_SOURCE 1" >>confdefs.h - - - - -# ----------------------------------------------------------------------------- -# configurable options - -# Check whether --enable-plugin-nfacct was given. -if test "${enable_plugin_nfacct+set}" = set; then : - enableval=$enable_plugin_nfacct; -else - enable_plugin_nfacct="no" - -fi - -# Check whether --enable-plugin-freeipmi was given. -if test "${enable_plugin_freeipmi+set}" = set; then : - enableval=$enable_plugin_freeipmi; -else - enable_plugin_freeipmi="detect" - -fi - -# Check whether --enable-pedantic was given. -if test "${enable_pedantic+set}" = set; then : - enableval=$enable_pedantic; -else - enable_pedantic="no" - -fi - -# Check whether --enable-accept4 was given. -if test "${enable_accept4+set}" = set; then : - enableval=$enable_accept4; -else - enable_accept4="detect" - -fi - - -# Check whether --with-webdir was given. -if test "${with_webdir+set}" = set; then : - withval=$with_webdir; webdir="${withval}" -else - webdir="\$(pkgdatadir)/web" - -fi - - -# Check whether --with-libcap was given. -if test "${with_libcap+set}" = set; then : - withval=$with_libcap; -else - with_libcap="detect" - -fi - - -# Check whether --with-zlib was given. -if test "${with_zlib+set}" = set; then : - withval=$with_zlib; -else - with_zlib="yes" - -fi - - -# Check whether --with-math was given. -if test "${with_math+set}" = set; then : - withval=$with_math; -else - with_math="yes" - -fi - - -# Check whether --with-user was given. -if test "${with_user+set}" = set; then : - withval=$with_user; -else - with_user="nobody" - -fi - -# Check whether --enable-x86-sse was given. -if test "${enable_x86_sse+set}" = set; then : - enableval=$enable_x86_sse; -else - enable_x86_sse="yes" - -fi - -# Check whether --enable-lto was given. -if test "${enable_lto+set}" = set; then : - enableval=$enable_lto; -else - enable_lto="detect" - -fi - - - -# ----------------------------------------------------------------------------- -# netdata required checks - -# fails on centos6 -#AX_CHECK_ENABLE_DEBUG() - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((returns_nonnull))" >&5 -$as_echo_n "checking for __attribute__((returns_nonnull))... " >&6; } -if ${ax_cv_have_func_attribute_returns_nonnull+:} false; then : - $as_echo_n "(cached) " >&6 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - - void *foo( void ) __attribute__((returns_nonnull)); - -int -main () -{ - - ; - return 0; -} - -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - if test -s conftest.err; then : - ax_cv_have_func_attribute_returns_nonnull=no -else - ax_cv_have_func_attribute_returns_nonnull=yes -fi -else - ax_cv_have_func_attribute_returns_nonnull=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_returns_nonnull" >&5 -$as_echo "$ax_cv_have_func_attribute_returns_nonnull" >&6; } - - if test yes = $ax_cv_have_func_attribute_returns_nonnull; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL 1 -_ACEOF - -fi - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((malloc))" >&5 -$as_echo_n "checking for __attribute__((malloc))... " >&6; } -if ${ax_cv_have_func_attribute_malloc+:} false; then : - $as_echo_n "(cached) " >&6 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - - void *foo( void ) __attribute__((malloc)); - -int -main () -{ - - ; - return 0; -} - -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - if test -s conftest.err; then : - ax_cv_have_func_attribute_malloc=no -else - ax_cv_have_func_attribute_malloc=yes -fi -else - ax_cv_have_func_attribute_malloc=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_malloc" >&5 -$as_echo "$ax_cv_have_func_attribute_malloc" >&6; } - - if test yes = $ax_cv_have_func_attribute_malloc; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_FUNC_ATTRIBUTE_MALLOC 1 -_ACEOF - -fi - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((noreturn))" >&5 -$as_echo_n "checking for __attribute__((noreturn))... " >&6; } -if ${ax_cv_have_func_attribute_noreturn+:} false; then : - $as_echo_n "(cached) " >&6 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - - void foo( void ) __attribute__((noreturn)); - -int -main () -{ - - ; - return 0; -} - -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - if test -s conftest.err; then : - ax_cv_have_func_attribute_noreturn=no -else - ax_cv_have_func_attribute_noreturn=yes -fi -else - ax_cv_have_func_attribute_noreturn=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_noreturn" >&5 -$as_echo "$ax_cv_have_func_attribute_noreturn" >&6; } - - if test yes = $ax_cv_have_func_attribute_noreturn; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_FUNC_ATTRIBUTE_NORETURN 1 -_ACEOF - -fi - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((noinline))" >&5 -$as_echo_n "checking for __attribute__((noinline))... " >&6; } -if ${ax_cv_have_func_attribute_noinline+:} false; then : - $as_echo_n "(cached) " >&6 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - - __attribute__((noinline)) int foo( void ) { return 0; } - -int -main () -{ - - ; - return 0; -} - -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - if test -s conftest.err; then : - ax_cv_have_func_attribute_noinline=no -else - ax_cv_have_func_attribute_noinline=yes -fi -else - ax_cv_have_func_attribute_noinline=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_noinline" >&5 -$as_echo "$ax_cv_have_func_attribute_noinline" >&6; } - - if test yes = $ax_cv_have_func_attribute_noinline; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_FUNC_ATTRIBUTE_NOINLINE 1 -_ACEOF - -fi - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((format))" >&5 -$as_echo_n "checking for __attribute__((format))... " >&6; } -if ${ax_cv_have_func_attribute_format+:} false; then : - $as_echo_n "(cached) " >&6 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - - int foo(const char *p, ...) __attribute__((format(printf, 1, 2))); - -int -main () -{ - - ; - return 0; -} - -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - if test -s conftest.err; then : - ax_cv_have_func_attribute_format=no -else - ax_cv_have_func_attribute_format=yes -fi -else - ax_cv_have_func_attribute_format=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_format" >&5 -$as_echo "$ax_cv_have_func_attribute_format" >&6; } - - if test yes = $ax_cv_have_func_attribute_format; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_FUNC_ATTRIBUTE_FORMAT 1 -_ACEOF - -fi - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((warn_unused_result))" >&5 -$as_echo_n "checking for __attribute__((warn_unused_result))... " >&6; } -if ${ax_cv_have_func_attribute_warn_unused_result+:} false; then : - $as_echo_n "(cached) " >&6 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - - int foo( void ) __attribute__((warn_unused_result)); - -int -main () -{ - - ; - return 0; -} - -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - if test -s conftest.err; then : - ax_cv_have_func_attribute_warn_unused_result=no -else - ax_cv_have_func_attribute_warn_unused_result=yes -fi -else - ax_cv_have_func_attribute_warn_unused_result=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_func_attribute_warn_unused_result" >&5 -$as_echo "$ax_cv_have_func_attribute_warn_unused_result" >&6; } - - if test yes = $ax_cv_have_func_attribute_warn_unused_result; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT 1 -_ACEOF - -fi - - - - -ac_fn_c_check_type "$LINENO" "struct timespec" "ac_cv_type_struct_timespec" "#include -" -if test "x$ac_cv_type_struct_timespec" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_TIMESPEC 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "clockid_t" "ac_cv_type_clockid_t" "#include -" -if test "x$ac_cv_type_clockid_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_CLOCKID_T 1 -_ACEOF - - -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 -$as_echo_n "checking for library containing clock_gettime... " >&6; } -if ${ac_cv_search_clock_gettime+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char clock_gettime (); -int -main () -{ -return clock_gettime (); - ; - return 0; -} -_ACEOF -for ac_lib in '' rt posix4; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_clock_gettime=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_clock_gettime+:} false; then : - break -fi -done -if ${ac_cv_search_clock_gettime+:} false; then : - -else - ac_cv_search_clock_gettime=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 -$as_echo "$ac_cv_search_clock_gettime" >&6; } -ac_res=$ac_cv_search_clock_gettime -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -fi - -for ac_func in clock_gettime -do : - ac_fn_c_check_func "$LINENO" "clock_gettime" "ac_cv_func_clock_gettime" -if test "x$ac_cv_func_clock_gettime" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_CLOCK_GETTIME 1 -_ACEOF - -fi -done - -for ac_func in sched_setscheduler sched_getscheduler sched_getparam sched_get_priority_min sched_get_priority_max getpriority setpriority nice -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - -fi -done - -for ac_func in recvmmsg -do : - ac_fn_c_check_func "$LINENO" "recvmmsg" "ac_cv_func_recvmmsg" -if test "x$ac_cv_func_recvmmsg" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_RECVMMSG 1 -_ACEOF - -fi -done - - -ac_fn_c_find_intX_t "$LINENO" "8" "ac_cv_c_int8_t" -case $ac_cv_c_int8_t in #( - no|yes) ;; #( - *) - -cat >>confdefs.h <<_ACEOF -#define int8_t $ac_cv_c_int8_t -_ACEOF -;; -esac - -ac_fn_c_find_intX_t "$LINENO" "16" "ac_cv_c_int16_t" -case $ac_cv_c_int16_t in #( - no|yes) ;; #( - *) - -cat >>confdefs.h <<_ACEOF -#define int16_t $ac_cv_c_int16_t -_ACEOF -;; -esac - -ac_fn_c_find_intX_t "$LINENO" "32" "ac_cv_c_int32_t" -case $ac_cv_c_int32_t in #( - no|yes) ;; #( - *) - -cat >>confdefs.h <<_ACEOF -#define int32_t $ac_cv_c_int32_t -_ACEOF -;; -esac - -ac_fn_c_find_intX_t "$LINENO" "64" "ac_cv_c_int64_t" -case $ac_cv_c_int64_t in #( - no|yes) ;; #( - *) - -cat >>confdefs.h <<_ACEOF -#define int64_t $ac_cv_c_int64_t -_ACEOF -;; -esac - -ac_fn_c_find_uintX_t "$LINENO" "8" "ac_cv_c_uint8_t" -case $ac_cv_c_uint8_t in #( - no|yes) ;; #( - *) - -$as_echo "#define _UINT8_T 1" >>confdefs.h - - -cat >>confdefs.h <<_ACEOF -#define uint8_t $ac_cv_c_uint8_t -_ACEOF -;; - esac - -ac_fn_c_find_uintX_t "$LINENO" "16" "ac_cv_c_uint16_t" -case $ac_cv_c_uint16_t in #( - no|yes) ;; #( - *) - - -cat >>confdefs.h <<_ACEOF -#define uint16_t $ac_cv_c_uint16_t -_ACEOF -;; - esac - -ac_fn_c_find_uintX_t "$LINENO" "32" "ac_cv_c_uint32_t" -case $ac_cv_c_uint32_t in #( - no|yes) ;; #( - *) - -$as_echo "#define _UINT32_T 1" >>confdefs.h - - -cat >>confdefs.h <<_ACEOF -#define uint32_t $ac_cv_c_uint32_t -_ACEOF -;; - esac - -ac_fn_c_find_uintX_t "$LINENO" "64" "ac_cv_c_uint64_t" -case $ac_cv_c_uint64_t in #( - no|yes) ;; #( - *) - -$as_echo "#define _UINT64_T 1" >>confdefs.h - - -cat >>confdefs.h <<_ACEOF -#define uint64_t $ac_cv_c_uint64_t -_ACEOF -;; - esac - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 -$as_echo_n "checking for inline... " >&6; } -if ${ac_cv_c_inline+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_c_inline=no -for ac_kw in inline __inline__ __inline; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifndef __cplusplus -typedef int foo_t; -static $ac_kw foo_t static_foo () {return 0; } -$ac_kw foo_t foo () {return 0; } -#endif - -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c_inline=$ac_kw -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - test "$ac_cv_c_inline" != no && break -done - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 -$as_echo "$ac_cv_c_inline" >&6; } - -case $ac_cv_c_inline in - inline | yes) ;; - *) - case $ac_cv_c_inline in - no) ac_val=;; - *) ac_val=$ac_cv_c_inline;; - esac - cat >>confdefs.h <<_ACEOF -#ifndef __cplusplus -#define inline $ac_val -#endif -_ACEOF - ;; -esac - -ac_fn_c_check_decl "$LINENO" "strerror_r" "ac_cv_have_decl_strerror_r" "$ac_includes_default" -if test "x$ac_cv_have_decl_strerror_r" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_STRERROR_R $ac_have_decl -_ACEOF - -for ac_func in strerror_r -do : - ac_fn_c_check_func "$LINENO" "strerror_r" "ac_cv_func_strerror_r" -if test "x$ac_cv_func_strerror_r" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_STRERROR_R 1 -_ACEOF - -fi -done - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns char *" >&5 -$as_echo_n "checking whether strerror_r returns char *... " >&6; } -if ${ac_cv_func_strerror_r_char_p+:} false; then : - $as_echo_n "(cached) " >&6 -else - - ac_cv_func_strerror_r_char_p=no - if test $ac_cv_have_decl_strerror_r = yes; then - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default -int -main () -{ - - char buf[100]; - char x = *strerror_r (0, buf, sizeof buf); - char *p = strerror_r (0, buf, sizeof buf); - return !p || x; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_func_strerror_r_char_p=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - else - # strerror_r is not declared. Choose between - # systems that have relatively inaccessible declarations for the - # function. BeOS and DEC UNIX 4.0 fall in this category, but the - # former has a strerror_r that returns char*, while the latter - # has a strerror_r that returns `int'. - # This test should segfault on the DEC system. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default - extern char *strerror_r (); -int -main () -{ -char buf[100]; - char x = *strerror_r (0, buf, sizeof buf); - return ! isalpha (x); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - ac_cv_func_strerror_r_char_p=yes -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - - fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_strerror_r_char_p" >&5 -$as_echo "$ac_cv_func_strerror_r_char_p" >&6; } -if test $ac_cv_func_strerror_r_char_p = yes; then - -$as_echo "#define STRERROR_R_CHAR_P 1" >>confdefs.h - -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Generic" >&5 -$as_echo_n "checking for _Generic... " >&6; } -if ${ac_cv_c__Generic+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int - main (int argc, char **argv) - { - int a = _Generic (argc, int: argc = 1); - int *b = &_Generic (argc, default: argc); - char ***c = _Generic (argv, int: argc, default: argv ? &argv : 0); - _Generic (1 ? 0 : b, int: a, default: b) = &argc; - _Generic (a = 1, default: a) = 3; - return a + !b + !c; - } - -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c__Generic=yes -else - ac_cv_c__Generic=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c__Generic" >&5 -$as_echo "$ac_cv_c__Generic" >&6; } -if test $ac_cv_c__Generic = yes; then - -$as_echo "#define HAVE_C__GENERIC 1" >>confdefs.h - -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __atomic" >&5 -$as_echo_n "checking for __atomic... " >&6; } -if ${ac_cv_c___atomic+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int - main (int argc, char **argv) - { - volatile unsigned long ul1 = 1, ul2 = 0, ul3 = 2; - __atomic_load_n(&ul1, __ATOMIC_SEQ_CST); - __atomic_compare_exchange(&ul1, &ul2, &ul3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&ul1, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_sub(&ul3, 1, __ATOMIC_SEQ_CST); - __atomic_or_fetch(&ul1, ul2, __ATOMIC_SEQ_CST); - __atomic_and_fetch(&ul1, ul2, __ATOMIC_SEQ_CST); - volatile unsigned long long ull1 = 1, ull2 = 0, ull3 = 2; - __atomic_load_n(&ull1, __ATOMIC_SEQ_CST); - __atomic_compare_exchange(&ull1, &ull2, &ull3, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); - __atomic_fetch_add(&ull1, 1, __ATOMIC_SEQ_CST); - __atomic_fetch_sub(&ull3, 1, __ATOMIC_SEQ_CST); - __atomic_or_fetch(&ull1, ull2, __ATOMIC_SEQ_CST); - __atomic_and_fetch(&ull1, ull2, __ATOMIC_SEQ_CST); - return 0; - } - -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_c___atomic=yes -else - ac_cv_c___atomic=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c___atomic" >&5 -$as_echo "$ac_cv_c___atomic" >&6; } -if test $ac_cv_c___atomic = yes; then - -$as_echo "#define HAVE_C___ATOMIC 1" >>confdefs.h - -fi - -# AC_C_STMT_EXPR -# The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5 -$as_echo_n "checking size of void *... " >&6; } -if ${ac_cv_sizeof_void_p+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_void_p" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "cannot compute sizeof (void *) -See \`config.log' for more details" "$LINENO" 5; } - else - ac_cv_sizeof_void_p=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5 -$as_echo "$ac_cv_sizeof_void_p" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_VOID_P $ac_cv_sizeof_void_p -_ACEOF - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether sys/types.h defines makedev" >&5 -$as_echo_n "checking whether sys/types.h defines makedev... " >&6; } -if ${ac_cv_header_sys_types_h_makedev+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -return makedev(0, 0); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_header_sys_types_h_makedev=yes -else - ac_cv_header_sys_types_h_makedev=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_types_h_makedev" >&5 -$as_echo "$ac_cv_header_sys_types_h_makedev" >&6; } - -if test $ac_cv_header_sys_types_h_makedev = no; then -ac_fn_c_check_header_mongrel "$LINENO" "sys/mkdev.h" "ac_cv_header_sys_mkdev_h" "$ac_includes_default" -if test "x$ac_cv_header_sys_mkdev_h" = xyes; then : - -$as_echo "#define MAJOR_IN_MKDEV 1" >>confdefs.h - -fi - - - - if test $ac_cv_header_sys_mkdev_h = no; then - ac_fn_c_check_header_mongrel "$LINENO" "sys/sysmacros.h" "ac_cv_header_sys_sysmacros_h" "$ac_includes_default" -if test "x$ac_cv_header_sys_sysmacros_h" = xyes; then : - -$as_echo "#define MAJOR_IN_SYSMACROS 1" >>confdefs.h - -fi - - - fi -fi - -for ac_header in sys/types.h netinet/in.h arpa/nameser.h netdb.h resolv.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_NETINET_IN_H -# include /* inet_ functions / structs */ -#endif -#ifdef HAVE_ARPA_NAMESER_H -# include /* DNS HEADER struct */ -#endif -#ifdef HAVE_NETDB_H -# include -#endif -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - - - - - - for ac_header in $ac_header_list -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - - - - - - - - - - - - - - - - -if test "${enable_accept4}" != "no"; then - - - - for ac_func in $ac_func_list -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - -fi -done - - - - -fi - -# ----------------------------------------------------------------------------- -# operating system detection - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking operating system" >&5 -$as_echo_n "checking operating system... " >&6; } -case "$host_os" in -freebsd*) - build_target=freebsd - build_target_id=2 - CFLAGS="${CFLAGS} -I/usr/local/include" - ;; -darwin*) - build_target=macos - build_target_id=3 - LDFLAGS="${LDFLAGS} -framework CoreFoundation -framework IOKit" - ;; -*) - build_target=linux - build_target_id=1 - ;; -esac - - if test "${build_target}" = "freebsd"; then - FREEBSD_TRUE= - FREEBSD_FALSE='#' -else - FREEBSD_TRUE='#' - FREEBSD_FALSE= -fi - - if test "${build_target}" = "macos"; then - MACOS_TRUE= - MACOS_FALSE='#' -else - MACOS_TRUE='#' - MACOS_FALSE= -fi - - if test "${build_target}" = "linux"; then - LINUX_TRUE= - LINUX_FALSE='#' -else - LINUX_TRUE='#' - LINUX_FALSE= -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${build_target} with id ${build_target_id}" >&5 -$as_echo "${build_target} with id ${build_target_id}" >&6; } - - -# ----------------------------------------------------------------------------- -# pthreads - - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -ax_pthread_ok=no - -# We used to check for pthread.h first, but this fails if pthread.h -# requires special compiler flags (e.g. on True64 or Sequent). -# It gets checked for in the link test anyway. - -# First of all, check if the user has set any of the PTHREAD_LIBS, -# etcetera environment variables, and if threads linking works using -# them: -if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5 -$as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... " >&6; } - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char pthread_join (); -int -main () -{ -return pthread_join (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ax_pthread_ok=yes -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 -$as_echo "$ax_pthread_ok" >&6; } - if test x"$ax_pthread_ok" = xno; then - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" - fi - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" -fi - -# We must check for the threads library under a number of different -# names; the ordering is very important because some systems -# (e.g. DEC) have both -lpthread and -lpthreads, where one of the -# libraries is broken (non-POSIX). - -# Create a list of thread flags to try. Items starting with a "-" are -# C compiler flags, and other items are library names, except for "none" -# which indicates that we try without any flags at all, and "pthread-config" -# which is a program returning the flags for the Pth emulation library. - -ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" - -# The ordering *is* (sometimes) important. Some notes on the -# individual items follow: - -# pthreads: AIX (must check this before -lpthread) -# none: in case threads are in libc; should be tried before -Kthread and -# other compiler flags to prevent continual compiler warnings -# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) -# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) -# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) -# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) -# -pthreads: Solaris/gcc -# -mthreads: Mingw32/gcc, Lynx/gcc -# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it -# doesn't hurt to check since this sometimes defines pthreads too; -# also defines -D_REENTRANT) -# ... -mt is also the pthreads flag for HP/aCC -# pthread: Linux, etcetera -# --thread-safe: KAI C++ -# pthread-config: use pthread-config program (for GNU Pth library) - -case ${host_os} in - solaris*) - - # On Solaris (at least, for some versions), libc contains stubbed - # (non-functional) versions of the pthreads routines, so link-based - # tests will erroneously succeed. (We need to link with -pthreads/-mt/ - # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather - # a function called by this macro, so we could check for that, but - # who knows whether they'll stub that too in a future libc.) So, - # we'll just look for -pthreads and -lpthread first: - - ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" - ;; - - darwin*) - ax_pthread_flags="-pthread $ax_pthread_flags" - ;; -esac - -# Clang doesn't consider unrecognized options an error unless we specify -# -Werror. We throw in some extra Clang-specific options to ensure that -# this doesn't happen for GCC, which also accepts -Werror. - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler needs -Werror to reject unknown flags" >&5 -$as_echo_n "checking if compiler needs -Werror to reject unknown flags... " >&6; } -save_CFLAGS="$CFLAGS" -ax_pthread_extra_flags="-Werror" -CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int foo(void); -int -main () -{ -foo() - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - ax_pthread_extra_flags= - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -CFLAGS="$save_CFLAGS" - -if test x"$ax_pthread_ok" = xno; then -for flag in $ax_pthread_flags; do - - case $flag in - none) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5 -$as_echo_n "checking whether pthreads work without any flags... " >&6; } - ;; - - -*) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5 -$as_echo_n "checking whether pthreads work with $flag... " >&6; } - PTHREAD_CFLAGS="$flag" - ;; - - pthread-config) - # Extract the first word of "pthread-config", so it can be a program name with args. -set dummy pthread-config; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ax_pthread_config+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ax_pthread_config"; then - ac_cv_prog_ax_pthread_config="$ax_pthread_config" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ax_pthread_config="yes" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - test -z "$ac_cv_prog_ax_pthread_config" && ac_cv_prog_ax_pthread_config="no" -fi -fi -ax_pthread_config=$ac_cv_prog_ax_pthread_config -if test -n "$ax_pthread_config"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_config" >&5 -$as_echo "$ax_pthread_config" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - if test x"$ax_pthread_config" = xno; then continue; fi - PTHREAD_CFLAGS="`pthread-config --cflags`" - PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" - ;; - - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5 -$as_echo_n "checking for the pthreads library -l$flag... " >&6; } - PTHREAD_LIBS="-l$flag" - ;; - esac - - save_LIBS="$LIBS" - save_CFLAGS="$CFLAGS" - LIBS="$PTHREAD_LIBS $LIBS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" - - # Check for various functions. We must include pthread.h, - # since some functions may be macros. (On the Sequent, we - # need a special flag -Kthread to make this header compile.) - # We check for pthread_join because it is in -lpthread on IRIX - # while pthread_create is in libc. We check for pthread_attr_init - # due to DEC craziness with -lpthreads. We check for - # pthread_cleanup_push because it is one of the few pthread - # functions on Solaris that doesn't have a non-functional libc stub. - # We try pthread_create on general principles. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - static void routine(void *a) { a = 0; } - static void *start_routine(void *a) { return a; } -int -main () -{ -pthread_t th; pthread_attr_t attr; - pthread_create(&th, 0, start_routine, 0); - pthread_join(th, 0); - pthread_attr_init(&attr); - pthread_cleanup_push(routine, 0); - pthread_cleanup_pop(0) /* ; */ - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ax_pthread_ok=yes -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 -$as_echo "$ax_pthread_ok" >&6; } - if test "x$ax_pthread_ok" = xyes; then - break; - fi - - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" -done -fi - -# Various other checks: -if test "x$ax_pthread_ok" = xyes; then - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - - # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for joinable pthread attribute" >&5 -$as_echo_n "checking for joinable pthread attribute... " >&6; } - attr_name=unknown - for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -int attr = $attr; return attr /* ; */ - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - attr_name=$attr; break -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $attr_name" >&5 -$as_echo "$attr_name" >&6; } - if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then - -cat >>confdefs.h <<_ACEOF -#define PTHREAD_CREATE_JOINABLE $attr_name -_ACEOF - - fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if more special flags are required for pthreads" >&5 -$as_echo_n "checking if more special flags are required for pthreads... " >&6; } - flag=no - case ${host_os} in - aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; - osf* | hpux*) flag="-D_REENTRANT";; - solaris*) - if test "$GCC" = "yes"; then - flag="-D_REENTRANT" - else - # TODO: What about Clang on Solaris? - flag="-mt -D_REENTRANT" - fi - ;; - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $flag" >&5 -$as_echo "$flag" >&6; } - if test "x$flag" != xno; then - PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" - fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PTHREAD_PRIO_INHERIT" >&5 -$as_echo_n "checking for PTHREAD_PRIO_INHERIT... " >&6; } -if ${ax_cv_PTHREAD_PRIO_INHERIT+:} false; then : - $as_echo_n "(cached) " >&6 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -int i = PTHREAD_PRIO_INHERIT; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ax_cv_PTHREAD_PRIO_INHERIT=yes -else - ax_cv_PTHREAD_PRIO_INHERIT=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_PRIO_INHERIT" >&5 -$as_echo "$ax_cv_PTHREAD_PRIO_INHERIT" >&6; } - if test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"; then : - -$as_echo "#define HAVE_PTHREAD_PRIO_INHERIT 1" >>confdefs.h - -fi - - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - - # More AIX lossage: compile with *_r variant - if test "x$GCC" != xyes; then - case $host_os in - aix*) - case "x/$CC" in #( - x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6) : - #handle absolute path differently from PATH based program lookup - case "x$CC" in #( - x/*) : - if as_fn_executable_p ${CC}_r; then : - PTHREAD_CC="${CC}_r" -fi ;; #( - *) : - for ac_prog in ${CC}_r -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_PTHREAD_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$PTHREAD_CC"; then - ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_PTHREAD_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -PTHREAD_CC=$ac_cv_prog_PTHREAD_CC -if test -n "$PTHREAD_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PTHREAD_CC" >&5 -$as_echo "$PTHREAD_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$PTHREAD_CC" && break -done -test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" - ;; -esac ;; #( - *) : - ;; -esac - ;; - esac - fi -fi - -test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" - - - - - -# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: -if test x"$ax_pthread_ok" = xyes; then - -$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h - - : -else - ax_pthread_ok=no - as_fn_error $? "Cannot initialize pthread environment" "$LINENO" 5 -fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -LIBS="${PTHREAD_LIBS} ${LIBS}" -CFLAGS="${CFLAGS} ${PTHREAD_CFLAGS}" -CC="${PTHREAD_CC}" - - -# ----------------------------------------------------------------------------- -# libm - - - -if test -z "${MATH_LIBS}"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sin in -lm" >&5 -$as_echo_n "checking for sin in -lm... " >&6; } -if ${ac_cv_lib_m_sin+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lm $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char sin (); -int -main () -{ -return sin (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_m_sin=yes -else - ac_cv_lib_m_sin=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_m_sin" >&5 -$as_echo "$ac_cv_lib_m_sin" >&6; } -if test "x$ac_cv_lib_m_sin" = xyes; then : - MATH_LIBS="-lm" - -fi - -fi -test "${with_math}" = "yes" -a -z "${MATH_LIBS}" && as_fn_error $? "math required but not found" "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if libm should be used" >&5 -$as_echo_n "checking if libm should be used... " >&6; } -if test "${with_math}" != "no" -a ! -z "${MATH_LIBS}"; then - with_math="yes" - -$as_echo "#define STORAGE_WITH_MATH 1" >>confdefs.h - - OPTIONAL_MATH_CFLAGS="${MATH_CFLAGS}" - OPTIONAL_MATH_LIBS="${MATH_LIBS}" -else - with_math="no" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_math}" >&5 -$as_echo "${with_math}" >&6; } - - -# ----------------------------------------------------------------------------- -# zlib - - -pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ZLIB" >&5 -$as_echo_n "checking for ZLIB... " >&6; } - -if test -n "$ZLIB_CFLAGS"; then - pkg_cv_ZLIB_CFLAGS="$ZLIB_CFLAGS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"zlib\""; } >&5 - ($PKG_CONFIG --exists --print-errors "zlib") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_ZLIB_CFLAGS=`$PKG_CONFIG --cflags "zlib" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi -if test -n "$ZLIB_LIBS"; then - pkg_cv_ZLIB_LIBS="$ZLIB_LIBS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"zlib\""; } >&5 - ($PKG_CONFIG --exists --print-errors "zlib") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_ZLIB_LIBS=`$PKG_CONFIG --libs "zlib" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi - - - -if test $pkg_failed = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi - if test $_pkg_short_errors_supported = yes; then - ZLIB_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "zlib" 2>&1` - else - ZLIB_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "zlib" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$ZLIB_PKG_ERRORS" >&5 - - have_zlib=no - -elif test $pkg_failed = untried; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - have_zlib=no - -else - ZLIB_CFLAGS=$pkg_cv_ZLIB_CFLAGS - ZLIB_LIBS=$pkg_cv_ZLIB_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - have_zlib=yes -fi -test "${with_zlib}" = "yes" -a "${have_zlib}" != "yes" && as_fn_error $? "zlib required but not found. Try installing 'zlib1g-dev' or 'zlib-devel'." "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if zlib should be used" >&5 -$as_echo_n "checking if zlib should be used... " >&6; } -if test "${with_zlib}" != "no" -a "${have_zlib}" = "yes"; then - with_zlib="yes" - -$as_echo "#define NETDATA_WITH_ZLIB 1" >>confdefs.h - - OPTIONAL_ZLIB_CLFAGS="${ZLIB_CFLAGS}" - OPTIONAL_ZLIB_LIBS="${ZLIB_LIBS}" -else - with_zlib="no" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_zlib}" >&5 -$as_echo "${with_zlib}" >&6; } - - -# ----------------------------------------------------------------------------- -# libuuid - - -pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for UUID" >&5 -$as_echo_n "checking for UUID... " >&6; } - -if test -n "$UUID_CFLAGS"; then - pkg_cv_UUID_CFLAGS="$UUID_CFLAGS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"uuid\""; } >&5 - ($PKG_CONFIG --exists --print-errors "uuid") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_UUID_CFLAGS=`$PKG_CONFIG --cflags "uuid" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi -if test -n "$UUID_LIBS"; then - pkg_cv_UUID_LIBS="$UUID_LIBS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"uuid\""; } >&5 - ($PKG_CONFIG --exists --print-errors "uuid") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_UUID_LIBS=`$PKG_CONFIG --libs "uuid" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi - - - -if test $pkg_failed = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi - if test $_pkg_short_errors_supported = yes; then - UUID_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "uuid" 2>&1` - else - UUID_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "uuid" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$UUID_PKG_ERRORS" >&5 - - as_fn_error $? "libuuid required but not found. Try installing 'uuid-dev' or 'libuuid-devel'." "$LINENO" 5 - -elif test $pkg_failed = untried; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - as_fn_error $? "libuuid required but not found. Try installing 'uuid-dev' or 'libuuid-devel'." "$LINENO" 5 - -else - UUID_CFLAGS=$pkg_cv_UUID_CFLAGS - UUID_LIBS=$pkg_cv_UUID_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - have_uuid=yes -fi - -$as_echo "#define NETDATA_WITH_UUID 1" >>confdefs.h - -OPTIONAL_UUID_CLFAGS="${UUID_CFLAGS}" -OPTIONAL_UUID_LIBS="${UUID_LIBS}" - - -# ----------------------------------------------------------------------------- -# compiler options - - -case $host_cpu in #( - i?86) : - SSE_CANDIDATE="yes" - ;; #( - *) : - ;; -esac - -if test "${SSE_CANDIDATE}" = "yes" -a "${enable_x86_sse}" = "yes"; then - opt="-msse2 -mfpmath=sse" - as_CACHEVAR=`$as_echo "ax_cv_check_cflags__${opt}" | $as_tr_sh` -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts ${opt}" >&5 -$as_echo_n "checking whether C compiler accepts ${opt}... " >&6; } -if eval \${$as_CACHEVAR+:} false; then : - $as_echo_n "(cached) " >&6 -else - - ax_check_save_flags=$CFLAGS - CFLAGS="$CFLAGS ${opt}" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$as_CACHEVAR=yes" -else - eval "$as_CACHEVAR=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - CFLAGS=$ax_check_save_flags -fi -eval ac_res=\$$as_CACHEVAR - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -if test x"`eval 'as_val=${'$as_CACHEVAR'};$as_echo "$as_val"'`" = xyes; then : - CFLAGS="${CFLAGS} ${opt}" -else - : -fi - -fi - -if test "${GCC}" = "yes"; then - -cat >>confdefs.h <<_ACEOF -#define likely(x) __builtin_expect(!!(x), 1) -_ACEOF - - -cat >>confdefs.h <<_ACEOF -#define unlikely(x) __builtin_expect(!!(x), 0) -_ACEOF - -else - -cat >>confdefs.h <<_ACEOF -#define likely(x) (x) -_ACEOF - - -cat >>confdefs.h <<_ACEOF -#define unlikely(x) (x) -_ACEOF - -fi - -if test "${enable_pedantic}" = "yes"; then - enable_strict="yes" - CFLAGS="${CFLAGS} -pedantic -Wall -Wextra -Wno-long-long" -fi - - -# ----------------------------------------------------------------------------- -# memory allocation library - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for memory allocator" >&5 -$as_echo_n "checking for memory allocator... " >&6; } - - -# Check whether --with-jemalloc-prefix was given. -if test "${with_jemalloc_prefix+set}" = set; then : - withval=$with_jemalloc_prefix; - jemalloc_prefix="$withval" - -else - - if test "`uname -s`" = "Darwin"; then - jemalloc_prefix="je_" - else - jemalloc_prefix="" - fi - - -fi - - -cat >>confdefs.h <<_ACEOF -#define prefix_jemalloc ${jemalloc_prefix} -_ACEOF - - -enable_jemalloc=no - -# Check whether --with-jemalloc was given. -if test "${with_jemalloc+set}" = set; then : - withval=$with_jemalloc; - if test "$withval" != "no"; then - if test "x${enable_tcmalloc}" = "xyes"; then - as_fn_error $? "Cannot compile with both jemalloc and tcmalloc" "$LINENO" 5 - fi - enable_jemalloc=yes - jemalloc_base_dir="$withval" - case "$withval" in - yes) - jemalloc_base_dir="/usr" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for jemalloc includes standard directories" >&5 -$as_echo_n "checking checking for jemalloc includes standard directories... " >&6; } - ;; - *":"*) - jemalloc_include="`echo $withval |sed -e 's/:.*$//'`" - jemalloc_ldflags="`echo $withval |sed -e 's/^.*://'`" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags" >&5 -$as_echo_n "checking checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags... " >&6; } - ;; - *) - jemalloc_include="$withval/include" - jemalloc_ldflags="$withval/lib" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for jemalloc includes in $withval" >&5 -$as_echo_n "checking checking for jemalloc includes in $withval... " >&6; } - ;; - esac - fi - -fi - - -has_jemalloc=0 -if test "$enable_jemalloc" != "no"; then - jemalloc_have_headers=0 - jemalloc_have_libs=0 - if test "$jemalloc_base_dir" != "/usr"; then - CFLAGS="${CFLAGS} -I${jemalloc_include}" - LDFLAGS="${LDFLAGS} -L${jemalloc_ldflags}" - LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -R${jemalloc_ldflags}" - fi - func="${jemalloc_prefix}malloc_stats_print" - as_ac_Lib=`$as_echo "ac_cv_lib_jemalloc_${func}" | $as_tr_sh` -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${func} in -ljemalloc" >&5 -$as_echo_n "checking for ${func} in -ljemalloc... " >&6; } -if eval \${$as_ac_Lib+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ljemalloc $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char ${func} (); -int -main () -{ -return ${func} (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$as_ac_Lib=yes" -else - eval "$as_ac_Lib=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -eval ac_res=\$$as_ac_Lib - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : - jemalloc_have_libs=1 -fi - - if test "$jemalloc_have_libs" != "0"; then - for ac_header in jemalloc/jemalloc.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "jemalloc/jemalloc.h" "ac_cv_header_jemalloc_jemalloc_h" "$ac_includes_default" -if test "x$ac_cv_header_jemalloc_jemalloc_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_JEMALLOC_JEMALLOC_H 1 -_ACEOF - jemalloc_have_headers=1 -fi - -done - - fi - if test "$jemalloc_have_headers" != "0"; then - has_jemalloc=1 - LIBS="${LIBS} -ljemalloc" - -$as_echo "#define has_jemalloc 1" >>confdefs.h - - else - as_fn_error $? "Couldn't find a jemalloc installation" "$LINENO" 5 - fi -fi - - -if test "$has_jemalloc" = "1"; then - -$as_echo "#define ENABLE_JEMALLOC 1" >>confdefs.h - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: jemalloc" >&5 -$as_echo "jemalloc" >&6; } -else - - -# Check whether --with-tcmalloc-lib was given. -if test "${with_tcmalloc_lib+set}" = set; then : - withval=$with_tcmalloc_lib; - with_tcmalloc_lib="$withval" - -else - - with_tcmalloc_lib="tcmalloc" - - -fi - - -has_tcmalloc=0 - -# Check whether --with-tcmalloc was given. -if test "${with_tcmalloc+set}" = set; then : - withval=$with_tcmalloc; - if test "$withval" != "no"; then - if test "x${enable_jemalloc}" = "xyes"; then - as_fn_error $? "Cannot compile with both tcmalloc and jemalloc" "$LINENO" 5 - fi - tcmalloc_have_lib=0 - if test "x$withval" != "xyes" && test "x$withval" != "x"; then - tcmalloc_ldflags="$withval/lib" - LDFLAGS="${LDFLAGS} -L${tcmalloc_ldflags}" - LIBTOOL_LINK_FLAGS="${LIBTOOL_LINK_FLAGS} -rpath ${tcmalloc_ldflags}" - fi - as_ac_Lib=`$as_echo "ac_cv_lib_${with_tcmalloc_lib}''_tc_cfree" | $as_tr_sh` -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for tc_cfree in -l${with_tcmalloc_lib}" >&5 -$as_echo_n "checking for tc_cfree in -l${with_tcmalloc_lib}... " >&6; } -if eval \${$as_ac_Lib+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-l${with_tcmalloc_lib} $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char tc_cfree (); -int -main () -{ -return tc_cfree (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$as_ac_Lib=yes" -else - eval "$as_ac_Lib=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -eval ac_res=\$$as_ac_Lib - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : - tcmalloc_have_lib=1 -fi - - if test "$tcmalloc_have_lib" != "0"; then - LIBS="${LIBS} -l${with_tcmalloc_lib}" - has_tcmalloc=1 - -$as_echo "#define has_tcmalloc 1" >>confdefs.h - - else - as_fn_error $? "Couldn't find a tcmalloc installation" "$LINENO" 5 - fi - fi - -fi - - - - if test "$has_tcmalloc" = "1"; then - -$as_echo "#define ENABLE_TCMALLOC 1" >>confdefs.h - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: tcmalloc" >&5 -$as_echo "tcmalloc" >&6; } - else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: system" >&5 -$as_echo "system" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mallopt" >&5 -$as_echo_n "checking for mallopt... " >&6; } -if ${ac_cv_c_mallopt+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - int main(int argc, char **argv) { - mallopt(M_ARENA_MAX, 1); - } - -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_c_mallopt=yes -else - ac_cv_c_mallopt=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_mallopt" >&5 -$as_echo "$ac_cv_c_mallopt" >&6; } -if test $ac_cv_c_mallopt = yes; then - -$as_echo "#define HAVE_C_MALLOPT 1" >>confdefs.h - -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mallinfo" >&5 -$as_echo_n "checking for mallinfo... " >&6; } -if ${ac_cv_c_mallinfo+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ - - struct mallinfo mi = mallinfo(); - /* make sure that fields exists */ - mi.uordblks = 0; - mi.hblkhd = 0; - mi.arena = 0; - - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_c_mallinfo=yes -else - ac_cv_c_mallinfo=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_mallinfo" >&5 -$as_echo "$ac_cv_c_mallinfo" >&6; } -if test $ac_cv_c_mallinfo = yes; then - -$as_echo "#define HAVE_C_MALLINFO 1" >>confdefs.h - -fi - - fi -fi - - -# ----------------------------------------------------------------------------- -# libcap - - -pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBCAP" >&5 -$as_echo_n "checking for LIBCAP... " >&6; } - -if test -n "$LIBCAP_CFLAGS"; then - pkg_cv_LIBCAP_CFLAGS="$LIBCAP_CFLAGS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcap\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libcap") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_LIBCAP_CFLAGS=`$PKG_CONFIG --cflags "libcap" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi -if test -n "$LIBCAP_LIBS"; then - pkg_cv_LIBCAP_LIBS="$LIBCAP_LIBS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libcap\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libcap") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_LIBCAP_LIBS=`$PKG_CONFIG --libs "libcap" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi - - - -if test $pkg_failed = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi - if test $_pkg_short_errors_supported = yes; then - LIBCAP_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libcap" 2>&1` - else - LIBCAP_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libcap" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$LIBCAP_PKG_ERRORS" >&5 - - have_libcap=no - -elif test $pkg_failed = untried; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - have_libcap=no - -else - LIBCAP_CFLAGS=$pkg_cv_LIBCAP_CFLAGS - LIBCAP_LIBS=$pkg_cv_LIBCAP_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for cap_get_proc, cap_set_proc in -lcap" >&5 -$as_echo_n "checking for cap_get_proc, cap_set_proc in -lcap... " >&6; } -if ${ac_cv_lib_cap_cap_get_proc__cap_set_proc+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lcap $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char cap_get_proc, cap_set_proc (); -int -main () -{ -return cap_get_proc, cap_set_proc (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_cap_cap_get_proc__cap_set_proc=yes -else - ac_cv_lib_cap_cap_get_proc__cap_set_proc=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_cap_cap_get_proc__cap_set_proc" >&5 -$as_echo "$ac_cv_lib_cap_cap_get_proc__cap_set_proc" >&6; } -if test "x$ac_cv_lib_cap_cap_get_proc__cap_set_proc" = xyes; then : - ac_fn_c_check_header_mongrel "$LINENO" "sys/capability.h" "ac_cv_header_sys_capability_h" "$ac_includes_default" -if test "x$ac_cv_header_sys_capability_h" = xyes; then : - have_libcap=yes -else - have_libcap=no - -fi - - -else - have_libcap=no - -fi - -fi -test "${with_libcap}" = "yes" -a "${have_libcap}" != "yes" && as_fn_error $? "libcap required but not found." "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if libcap should be used" >&5 -$as_echo_n "checking if libcap should be used... " >&6; } -if test "${with_libcap}" != "no" -a "${have_libcap}" = "yes"; then - with_libcap="yes" - -$as_echo "#define HAVE_CAPABILITY 1" >>confdefs.h - - OPTIONAL_LIBCAP_CLFAGS="${LIBCAP_CFLAGS}" - OPTIONAL_LIBCAP_LIBS="${LIBCAP_LIBS}" -else - with_libcap="no" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libcap}" >&5 -$as_echo "${with_libcap}" >&6; } - if test "${with_libcap}" = "yes"; then - ENABLE_CAPABILITY_TRUE= - ENABLE_CAPABILITY_FALSE='#' -else - ENABLE_CAPABILITY_TRUE='#' - ENABLE_CAPABILITY_FALSE= -fi - - - -# ----------------------------------------------------------------------------- -# apps.plugin - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if apps.plugin should be enabled" >&5 -$as_echo_n "checking if apps.plugin should be enabled... " >&6; } -if test "${build_target}" != "macos"; then - enable_plugin_apps="yes" -else - enable_plugin_apps="no" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_plugin_apps}" >&5 -$as_echo "${enable_plugin_apps}" >&6; } - if test "${enable_plugin_apps}" = "yes"; then - ENABLE_PLUGIN_APPS_TRUE= - ENABLE_PLUGIN_APPS_FALSE='#' -else - ENABLE_PLUGIN_APPS_TRUE='#' - ENABLE_PLUGIN_APPS_FALSE= -fi - - - -# ----------------------------------------------------------------------------- -# freeipmi.plugin - libipmimonitoring - - -pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for IPMIMONITORING" >&5 -$as_echo_n "checking for IPMIMONITORING... " >&6; } - -if test -n "$IPMIMONITORING_CFLAGS"; then - pkg_cv_IPMIMONITORING_CFLAGS="$IPMIMONITORING_CFLAGS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libipmimonitoring\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libipmimonitoring") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_IPMIMONITORING_CFLAGS=`$PKG_CONFIG --cflags "libipmimonitoring" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi -if test -n "$IPMIMONITORING_LIBS"; then - pkg_cv_IPMIMONITORING_LIBS="$IPMIMONITORING_LIBS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libipmimonitoring\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libipmimonitoring") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_IPMIMONITORING_LIBS=`$PKG_CONFIG --libs "libipmimonitoring" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi - - - -if test $pkg_failed = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi - if test $_pkg_short_errors_supported = yes; then - IPMIMONITORING_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libipmimonitoring" 2>&1` - else - IPMIMONITORING_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libipmimonitoring" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$IPMIMONITORING_PKG_ERRORS" >&5 - - have_ipmimonitoring=no - -elif test $pkg_failed = untried; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - have_ipmimonitoring=no - -else - IPMIMONITORING_CFLAGS=$pkg_cv_IPMIMONITORING_CFLAGS - IPMIMONITORING_LIBS=$pkg_cv_IPMIMONITORING_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for - ipmi_monitoring_sensor_readings_by_record_id, - ipmi_monitoring_sensor_readings_by_sensor_type, - ipmi_monitoring_sensor_read_sensor_number, - ipmi_monitoring_sensor_read_sensor_name, - ipmi_monitoring_sensor_read_sensor_state, - ipmi_monitoring_sensor_read_sensor_units, - ipmi_monitoring_sensor_iterator_next, - ipmi_monitoring_ctx_sensor_config_file, - ipmi_monitoring_ctx_sdr_cache_directory, - ipmi_monitoring_ctx_errormsg, - ipmi_monitoring_ctx_create - in -lipmimonitoring" >&5 -$as_echo_n "checking for - ipmi_monitoring_sensor_readings_by_record_id, - ipmi_monitoring_sensor_readings_by_sensor_type, - ipmi_monitoring_sensor_read_sensor_number, - ipmi_monitoring_sensor_read_sensor_name, - ipmi_monitoring_sensor_read_sensor_state, - ipmi_monitoring_sensor_read_sensor_units, - ipmi_monitoring_sensor_iterator_next, - ipmi_monitoring_ctx_sensor_config_file, - ipmi_monitoring_ctx_sdr_cache_directory, - ipmi_monitoring_ctx_errormsg, - ipmi_monitoring_ctx_create - in -lipmimonitoring... " >&6; } -if ${ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lipmimonitoring $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char - ipmi_monitoring_sensor_readings_by_record_id, - ipmi_monitoring_sensor_readings_by_sensor_type, - ipmi_monitoring_sensor_read_sensor_number, - ipmi_monitoring_sensor_read_sensor_name, - ipmi_monitoring_sensor_read_sensor_state, - ipmi_monitoring_sensor_read_sensor_units, - ipmi_monitoring_sensor_iterator_next, - ipmi_monitoring_ctx_sensor_config_file, - ipmi_monitoring_ctx_sdr_cache_directory, - ipmi_monitoring_ctx_errormsg, - ipmi_monitoring_ctx_create - (); -int -main () -{ -return - ipmi_monitoring_sensor_readings_by_record_id, - ipmi_monitoring_sensor_readings_by_sensor_type, - ipmi_monitoring_sensor_read_sensor_number, - ipmi_monitoring_sensor_read_sensor_name, - ipmi_monitoring_sensor_read_sensor_state, - ipmi_monitoring_sensor_read_sensor_units, - ipmi_monitoring_sensor_iterator_next, - ipmi_monitoring_ctx_sensor_config_file, - ipmi_monitoring_ctx_sdr_cache_directory, - ipmi_monitoring_ctx_errormsg, - ipmi_monitoring_ctx_create - (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____=yes -else - ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____" >&5 -$as_echo "$ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____" >&6; } -if test "x$ac_cv_lib_ipmimonitoring__________ipmi_monitoring_sensor_readings_by_record_id__________ipmi_monitoring_sensor_readings_by_sensor_type__________ipmi_monitoring_sensor_read_sensor_number__________ipmi_monitoring_sensor_read_sensor_name__________ipmi_monitoring_sensor_read_sensor_state__________ipmi_monitoring_sensor_read_sensor_units__________ipmi_monitoring_sensor_iterator_next__________ipmi_monitoring_ctx_sensor_config_file__________ipmi_monitoring_ctx_sdr_cache_directory__________ipmi_monitoring_ctx_errormsg__________ipmi_monitoring_ctx_create_____" = xyes; then : - ac_fn_c_check_header_mongrel "$LINENO" "ipmi_monitoring.h" "ac_cv_header_ipmi_monitoring_h" "$ac_includes_default" -if test "x$ac_cv_header_ipmi_monitoring_h" = xyes; then : - ac_fn_c_check_header_mongrel "$LINENO" "ipmi_monitoring_bitmasks.h" "ac_cv_header_ipmi_monitoring_bitmasks_h" "$ac_includes_default" -if test "x$ac_cv_header_ipmi_monitoring_bitmasks_h" = xyes; then : - have_ipmimonitoring=yes -else - have_ipmimonitoring=no - -fi - - -else - have_ipmimonitoring=no - -fi - - -else - have_ipmimonitoring=no - -fi - -fi -test "${enable_plugin_freeipmi}" = "yes" -a "${have_ipmimonitoring}" != "yes" && \ - as_fn_error $? "ipmimonitoring required but not found. Try installing 'libipmimonitoring-dev' or 'libipmimonitoring-devel'" "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if freeipmi.plugin should be enabled" >&5 -$as_echo_n "checking if freeipmi.plugin should be enabled... " >&6; } -if test "${enable_plugin_freeipmi}" != "no" -a "${have_ipmimonitoring}" = "yes"; then - enable_plugin_freeipmi="yes" - -$as_echo "#define HAVE_FREEIPMI 1" >>confdefs.h - - OPTIONAL_IPMIMONITORING_CLFAGS="${IPMIMONITORING_CFLAGS}" - OPTIONAL_IPMIMONITORING_LIBS="${IPMIMONITORING_LIBS}" -else - enable_plugin_freeipmi="no" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_plugin_freeipmi}" >&5 -$as_echo "${enable_plugin_freeipmi}" >&6; } - if test "${enable_plugin_freeipmi}" = "yes"; then - ENABLE_PLUGIN_FREEIPMI_TRUE= - ENABLE_PLUGIN_FREEIPMI_FALSE='#' -else - ENABLE_PLUGIN_FREEIPMI_TRUE='#' - ENABLE_PLUGIN_FREEIPMI_FALSE= -fi - - - -# ----------------------------------------------------------------------------- -# nfacct.plugin - libmnl, libnetfilter_acct - - - - - - -pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for NFACCT" >&5 -$as_echo_n "checking for NFACCT... " >&6; } - -if test -n "$NFACCT_CFLAGS"; then - pkg_cv_NFACCT_CFLAGS="$NFACCT_CFLAGS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libnetfilter_acct\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libnetfilter_acct") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_NFACCT_CFLAGS=`$PKG_CONFIG --cflags "libnetfilter_acct" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi -if test -n "$NFACCT_LIBS"; then - pkg_cv_NFACCT_LIBS="$NFACCT_LIBS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libnetfilter_acct\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libnetfilter_acct") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_NFACCT_LIBS=`$PKG_CONFIG --libs "libnetfilter_acct" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi - - - -if test $pkg_failed = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi - if test $_pkg_short_errors_supported = yes; then - NFACCT_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libnetfilter_acct" 2>&1` - else - NFACCT_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libnetfilter_acct" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$NFACCT_PKG_ERRORS" >&5 - - have_libnetfilter_acct=no - -elif test $pkg_failed = untried; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - have_libnetfilter_acct=no - -else - NFACCT_CFLAGS=$pkg_cv_NFACCT_CFLAGS - NFACCT_LIBS=$pkg_cv_NFACCT_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - have_libnetfilter_acct=yes -fi - - -pkg_failed=no -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBMNL" >&5 -$as_echo_n "checking for LIBMNL... " >&6; } - -if test -n "$LIBMNL_CFLAGS"; then - pkg_cv_LIBMNL_CFLAGS="$LIBMNL_CFLAGS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmnl\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libmnl") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_LIBMNL_CFLAGS=`$PKG_CONFIG --cflags "libmnl" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi -if test -n "$LIBMNL_LIBS"; then - pkg_cv_LIBMNL_LIBS="$LIBMNL_LIBS" - elif test -n "$PKG_CONFIG"; then - if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmnl\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libmnl") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - pkg_cv_LIBMNL_LIBS=`$PKG_CONFIG --libs "libmnl" 2>/dev/null` - test "x$?" != "x0" && pkg_failed=yes -else - pkg_failed=yes -fi - else - pkg_failed=untried -fi - - - -if test $pkg_failed = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi - if test $_pkg_short_errors_supported = yes; then - LIBMNL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libmnl" 2>&1` - else - LIBMNL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libmnl" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$LIBMNL_PKG_ERRORS" >&5 - - have_libmnl=no - -elif test $pkg_failed = untried; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - have_libmnl=no - -else - LIBMNL_CFLAGS=$pkg_cv_LIBMNL_CFLAGS - LIBMNL_LIBS=$pkg_cv_LIBMNL_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - have_libmnl=yes -fi - -test "${enable_plugin_nfacct}" = "yes" -a "${have_libnetfilter_acct}" != "yes" && \ - as_fn_error $? "netfilter_acct required but not found" "$LINENO" 5 - -test "${enable_plugin_nfacct}" = "yes" -a "${have_libmnl}" != "yes" && \ - as_fn_error $? "libmnl required but not found. Try installing 'libmnl-dev' or 'libmnl-devel'" "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if nfacct.plugin should be enabled" >&5 -$as_echo_n "checking if nfacct.plugin should be enabled... " >&6; } -if test "${enable_plugin_nfacct}" != "no" -a "${have_libnetfilter_acct}" = "yes" -a "${have_libmnl}" = "yes"; then - enable_plugin_nfacct="yes" - -$as_echo "#define HAVE_LIBMNL 1" >>confdefs.h - - -$as_echo "#define HAVE_LIBNETFILTER_ACCT 1" >>confdefs.h - - -$as_echo "#define INTERNAL_PLUGIN_NFACCT 1" >>confdefs.h - - OPTIONAL_NFACCT_CLFAGS="${NFACCT_CFLAGS} ${LIBMNL_CFLAGS}" - OPTIONAL_NFACCT_LIBS="${NFACCT_LIBS} ${LIBMNL_LIBS}" -else - enable_plugin_nfacct="no" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_plugin_nfacct}" >&5 -$as_echo "${enable_plugin_nfacct}" >&6; } - if test "${enable_plugin_nfacct}" = "yes"; then - ENABLE_PLUGIN_NFACCT_TRUE= - ENABLE_PLUGIN_NFACCT_FALSE='#' -else - ENABLE_PLUGIN_NFACCT_TRUE='#' - ENABLE_PLUGIN_NFACCT_FALSE= -fi - - - -# ----------------------------------------------------------------------------- -# check for setns() - cgroup-network - -ac_fn_c_check_func "$LINENO" "setns" "ac_cv_func_setns" -if test "x$ac_cv_func_setns" = xyes; then : - -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if cgroup-network can be enabled" >&5 -$as_echo_n "checking if cgroup-network can be enabled... " >&6; } -if test "$ac_cv_func_setns" = "yes" ; then - have_setns="yes" - -$as_echo "#define HAVE_SETNS 1" >>confdefs.h - -else - have_setns="no" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${have_setns}" >&5 -$as_echo "${have_setns}" >&6; } - if test "${have_setns}" = "yes"; then - ENABLE_PLUGIN_CGROUP_NETWORK_TRUE= - ENABLE_PLUGIN_CGROUP_NETWORK_FALSE='#' -else - ENABLE_PLUGIN_CGROUP_NETWORK_TRUE='#' - ENABLE_PLUGIN_CGROUP_NETWORK_FALSE= -fi - - - -# ----------------------------------------------------------------------------- -# Link-Time-Optimization - -if test "${enable_lto}" != "no"; then - opt="-flto" - as_CACHEVAR=`$as_echo "ax_cv_check_cflags__${opt}" | $as_tr_sh` -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts ${opt}" >&5 -$as_echo_n "checking whether C compiler accepts ${opt}... " >&6; } -if eval \${$as_CACHEVAR+:} false; then : - $as_echo_n "(cached) " >&6 -else - - ax_check_save_flags=$CFLAGS - CFLAGS="$CFLAGS ${opt}" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$as_CACHEVAR=yes" -else - eval "$as_CACHEVAR=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - CFLAGS=$ax_check_save_flags -fi -eval ac_res=\$$as_CACHEVAR - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -if test x"`eval 'as_val=${'$as_CACHEVAR'};$as_echo "$as_val"'`" = xyes; then : - have_lto=yes -else - have_lto=no -fi - -fi -if test "${have_lto}" = "yes"; then - oCFLAGS="${CFLAGS}" - CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CLFAGS} ${OPTIONAL_NFACCT_CLFAGS} ${OPTIONAL_ZLIB_CLFAGS} ${OPTIONAL_UUID_CLFAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS}" - ac_cv_c_lto_cross_compile="${enable_lto}" - test "${ac_cv_c_lto_cross_compile}" != "yes" && ac_cv_c_lto_cross_compile="no" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if -flto builds executables" >&5 -$as_echo_n "checking if -flto builds executables... " >&6; } -if ${ac_cv_c_lto+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - ac_cv_c_lto=${ac_cv_c_lto_cross_compile} -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - int main(int argc, char **argv) { - return 0; - } - -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - ac_cv_c_lto=yes -else - ac_cv_c_lto=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_lto" >&5 -$as_echo "$ac_cv_c_lto" >&6; } -if test "${ac_cv_c_lto}" = "yes"; then - -$as_echo "#define HAVE_LTO 1" >>confdefs.h - -fi - - CFLAGS="${oCFLAGS}" - test "${ac_cv_c_lto}" != "yes" && have_lto="no" -fi -test "${enable_lto}" = "yes" -a "${have_lto}" != "yes" && \ - as_fn_error $? "LTO is required but is not available." "$LINENO" 5 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if LTO should be enabled" >&5 -$as_echo_n "checking if LTO should be enabled... " >&6; } -if test "${enable_lto}" != "no" -a "${have_lto}" = "yes"; then - enable_lto="yes" - CFLAGS="${CFLAGS} -flto" -else - enable_lto="no" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_lto}" >&5 -$as_echo "${enable_lto}" >&6; } - - -# ----------------------------------------------------------------------------- - - -cat >>confdefs.h <<_ACEOF -#define NETDATA_USER "${with_user}" -_ACEOF - - -varlibdir="${localstatedir}/lib/netdata" -registrydir="${localstatedir}/lib/netdata/registry" -cachedir="${localstatedir}/cache/netdata" -chartsdir="${libexecdir}/netdata/charts.d" -nodedir="${libexecdir}/netdata/node.d" -pythondir="${libexecdir}/netdata/python.d" -configdir="${sysconfdir}/netdata" -libconfigdir="${libdir}/netdata/conf.d" -logdir="${localstatedir}/log/netdata" -pluginsdir="${libexecdir}/netdata/plugins.d" - - - - - - - - - - - - - - -CPPFLAGS="\ - -DTARGET_OS=${build_target_id} \ - -DVARLIB_DIR=\"\\\"${varlibdir}\\\"\" \ - -DCACHE_DIR=\"\\\"${cachedir}\\\"\" \ - -DCONFIG_DIR=\"\\\"${configdir}\\\"\" \ - -DLIBCONFIG_DIR=\"\\\"${libconfigdir}\\\"\" \ - -DLOG_DIR=\"\\\"${logdir}\\\"\" \ - -DPLUGINS_DIR=\"\\\"${pluginsdir}\\\"\" \ - -DRUN_DIR=\"\\\"${localstatedir}/run/netdata\\\"\" \ - -DWEB_DIR=\"\\\"${webdir}\\\"\" \ -" - - - - - - - - - - - - - - -ac_config_files="$ac_config_files Makefile netdata.spec backends/graphite/Makefile backends/json/Makefile backends/Makefile backends/opentsdb/Makefile backends/prometheus/Makefile collectors/Makefile collectors/apps.plugin/Makefile collectors/cgroups.plugin/Makefile collectors/charts.d.plugin/Makefile collectors/checks.plugin/Makefile collectors/diskspace.plugin/Makefile collectors/fping.plugin/Makefile collectors/freebsd.plugin/Makefile collectors/freeipmi.plugin/Makefile collectors/idlejitter.plugin/Makefile collectors/macos.plugin/Makefile collectors/nfacct.plugin/Makefile collectors/node.d.plugin/Makefile collectors/plugins.d/Makefile collectors/proc.plugin/Makefile collectors/python.d.plugin/Makefile collectors/statsd.plugin/Makefile collectors/tc.plugin/Makefile contrib/Makefile daemon/Makefile database/Makefile diagrams/Makefile health/Makefile health/notifications/Makefile libnetdata/Makefile libnetdata/adaptive_resortable_list/Makefile libnetdata/avl/Makefile libnetdata/buffer/Makefile libnetdata/clocks/Makefile libnetdata/config/Makefile libnetdata/dictionary/Makefile libnetdata/eval/Makefile libnetdata/locks/Makefile libnetdata/log/Makefile libnetdata/popen/Makefile libnetdata/procfile/Makefile libnetdata/simple_pattern/Makefile libnetdata/socket/Makefile libnetdata/statistical/Makefile libnetdata/storage_number/Makefile libnetdata/threads/Makefile libnetdata/url/Makefile makeself/Makefile registry/Makefile streaming/Makefile system/Makefile tests/Makefile web/Makefile web/api/Makefile web/api/badges/Makefile web/api/exporters/Makefile web/api/exporters/shell/Makefile web/api/exporters/prometheus/Makefile web/api/formatters/Makefile web/api/formatters/csv/Makefile web/api/formatters/json/Makefile web/api/formatters/ssv/Makefile web/api/formatters/value/Makefile web/api/queries/Makefile web/api/queries/average/Makefile web/api/queries/des/Makefile web/api/queries/incremental_sum/Makefile web/api/queries/max/Makefile web/api/queries/median/Makefile web/api/queries/min/Makefile web/api/queries/ses/Makefile web/api/queries/stddev/Makefile web/api/queries/sum/Makefile web/gui/Makefile web/server/Makefile web/server/single/Makefile web/server/multi/Makefile web/server/static/Makefile" - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - if test "x$cache_file" != "x/dev/null"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - if test ! -f "$cache_file" || test -h "$cache_file"; then - cat confcache >"$cache_file" - else - case $cache_file in #( - */* | ?:*) - mv -f confcache "$cache_file"$$ && - mv -f "$cache_file"$$ "$cache_file" ;; #( - *) - mv -f confcache "$cache_file" ;; - esac - fi - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -DEFS=-DHAVE_CONFIG_H - -ac_libobjs= -ac_ltlibobjs= -U= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - -if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then - as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 -$as_echo_n "checking that generated files are newer than configure... " >&6; } - if test -n "$am_sleep_pid"; then - # Hide warnings about reused PIDs. - wait $am_sleep_pid 2>/dev/null - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 -$as_echo "done" >&6; } - if test -n "$EXEEXT"; then - am__EXEEXT_TRUE= - am__EXEEXT_FALSE='#' -else - am__EXEEXT_TRUE='#' - am__EXEEXT_FALSE= -fi - -if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then - as_fn_error $? "conditional \"AMDEP\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then - as_fn_error $? "conditional \"am__fastdepCC\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${FREEBSD_TRUE}" && test -z "${FREEBSD_FALSE}"; then - as_fn_error $? "conditional \"FREEBSD\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${MACOS_TRUE}" && test -z "${MACOS_FALSE}"; then - as_fn_error $? "conditional \"MACOS\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${LINUX_TRUE}" && test -z "${LINUX_FALSE}"; then - as_fn_error $? "conditional \"LINUX\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${ENABLE_CAPABILITY_TRUE}" && test -z "${ENABLE_CAPABILITY_FALSE}"; then - as_fn_error $? "conditional \"ENABLE_CAPABILITY\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${ENABLE_PLUGIN_APPS_TRUE}" && test -z "${ENABLE_PLUGIN_APPS_FALSE}"; then - as_fn_error $? "conditional \"ENABLE_PLUGIN_APPS\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${ENABLE_PLUGIN_FREEIPMI_TRUE}" && test -z "${ENABLE_PLUGIN_FREEIPMI_FALSE}"; then - as_fn_error $? "conditional \"ENABLE_PLUGIN_FREEIPMI\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${ENABLE_PLUGIN_NFACCT_TRUE}" && test -z "${ENABLE_PLUGIN_NFACCT_FALSE}"; then - as_fn_error $? "conditional \"ENABLE_PLUGIN_NFACCT\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${ENABLE_PLUGIN_CGROUP_NETWORK_TRUE}" && test -z "${ENABLE_PLUGIN_CGROUP_NETWORK_FALSE}"; then - as_fn_error $? "conditional \"ENABLE_PLUGIN_CGROUP_NETWORK\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi - -: "${CONFIG_STATUS=./config.status}" -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by netdata $as_me 1.11.1_rolling, which was -generated by GNU Autoconf 2.69. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - -case $ac_config_files in *" -"*) set x $ac_config_files; shift; ac_config_files=$*;; -esac - -case $ac_config_headers in *" -"*) set x $ac_config_headers; shift; ac_config_headers=$*;; -esac - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_files="$ac_config_files" -config_headers="$ac_config_headers" -config_commands="$ac_config_commands" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - --config print configuration, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE - --header=FILE[:TEMPLATE] - instantiate the configuration header FILE - -Configuration files: -$config_files - -Configuration headers: -$config_headers - -Configuration commands: -$config_commands - -Report bugs to the package provider." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" -ac_cs_version="\\ -netdata config.status 1.11.1_rolling -configured by $0, generated by GNU Autoconf 2.69, - with options \\"\$ac_cs_config\\" - -Copyright (C) 2012 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -INSTALL='$INSTALL' -MKDIR_P='$MKDIR_P' -AWK='$AWK' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=?*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - --*=) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg= - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - '') as_fn_error $? "missing file argument" ;; - esac - as_fn_append CONFIG_FILES " '$ac_optarg'" - ac_need_defaults=false;; - --header | --heade | --head | --hea ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h) - # Conflict between --help and --header - as_fn_error $? "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; - --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error $? "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# -# INIT-COMMANDS -# -AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; - "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; - "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; - "netdata.spec") CONFIG_FILES="$CONFIG_FILES netdata.spec" ;; - "backends/graphite/Makefile") CONFIG_FILES="$CONFIG_FILES backends/graphite/Makefile" ;; - "backends/json/Makefile") CONFIG_FILES="$CONFIG_FILES backends/json/Makefile" ;; - "backends/Makefile") CONFIG_FILES="$CONFIG_FILES backends/Makefile" ;; - "backends/opentsdb/Makefile") CONFIG_FILES="$CONFIG_FILES backends/opentsdb/Makefile" ;; - "backends/prometheus/Makefile") CONFIG_FILES="$CONFIG_FILES backends/prometheus/Makefile" ;; - "collectors/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/Makefile" ;; - "collectors/apps.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/apps.plugin/Makefile" ;; - "collectors/cgroups.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/cgroups.plugin/Makefile" ;; - "collectors/charts.d.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/charts.d.plugin/Makefile" ;; - "collectors/checks.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/checks.plugin/Makefile" ;; - "collectors/diskspace.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/diskspace.plugin/Makefile" ;; - "collectors/fping.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/fping.plugin/Makefile" ;; - "collectors/freebsd.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/freebsd.plugin/Makefile" ;; - "collectors/freeipmi.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/freeipmi.plugin/Makefile" ;; - "collectors/idlejitter.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/idlejitter.plugin/Makefile" ;; - "collectors/macos.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/macos.plugin/Makefile" ;; - "collectors/nfacct.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/nfacct.plugin/Makefile" ;; - "collectors/node.d.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/node.d.plugin/Makefile" ;; - "collectors/plugins.d/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/plugins.d/Makefile" ;; - "collectors/proc.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/proc.plugin/Makefile" ;; - "collectors/python.d.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/python.d.plugin/Makefile" ;; - "collectors/statsd.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/statsd.plugin/Makefile" ;; - "collectors/tc.plugin/Makefile") CONFIG_FILES="$CONFIG_FILES collectors/tc.plugin/Makefile" ;; - "contrib/Makefile") CONFIG_FILES="$CONFIG_FILES contrib/Makefile" ;; - "daemon/Makefile") CONFIG_FILES="$CONFIG_FILES daemon/Makefile" ;; - "database/Makefile") CONFIG_FILES="$CONFIG_FILES database/Makefile" ;; - "diagrams/Makefile") CONFIG_FILES="$CONFIG_FILES diagrams/Makefile" ;; - "health/Makefile") CONFIG_FILES="$CONFIG_FILES health/Makefile" ;; - "health/notifications/Makefile") CONFIG_FILES="$CONFIG_FILES health/notifications/Makefile" ;; - "libnetdata/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/Makefile" ;; - "libnetdata/adaptive_resortable_list/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/adaptive_resortable_list/Makefile" ;; - "libnetdata/avl/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/avl/Makefile" ;; - "libnetdata/buffer/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/buffer/Makefile" ;; - "libnetdata/clocks/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/clocks/Makefile" ;; - "libnetdata/config/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/config/Makefile" ;; - "libnetdata/dictionary/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/dictionary/Makefile" ;; - "libnetdata/eval/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/eval/Makefile" ;; - "libnetdata/locks/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/locks/Makefile" ;; - "libnetdata/log/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/log/Makefile" ;; - "libnetdata/popen/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/popen/Makefile" ;; - "libnetdata/procfile/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/procfile/Makefile" ;; - "libnetdata/simple_pattern/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/simple_pattern/Makefile" ;; - "libnetdata/socket/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/socket/Makefile" ;; - "libnetdata/statistical/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/statistical/Makefile" ;; - "libnetdata/storage_number/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/storage_number/Makefile" ;; - "libnetdata/threads/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/threads/Makefile" ;; - "libnetdata/url/Makefile") CONFIG_FILES="$CONFIG_FILES libnetdata/url/Makefile" ;; - "makeself/Makefile") CONFIG_FILES="$CONFIG_FILES makeself/Makefile" ;; - "registry/Makefile") CONFIG_FILES="$CONFIG_FILES registry/Makefile" ;; - "streaming/Makefile") CONFIG_FILES="$CONFIG_FILES streaming/Makefile" ;; - "system/Makefile") CONFIG_FILES="$CONFIG_FILES system/Makefile" ;; - "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;; - "web/Makefile") CONFIG_FILES="$CONFIG_FILES web/Makefile" ;; - "web/api/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/Makefile" ;; - "web/api/badges/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/badges/Makefile" ;; - "web/api/exporters/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/exporters/Makefile" ;; - "web/api/exporters/shell/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/exporters/shell/Makefile" ;; - "web/api/exporters/prometheus/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/exporters/prometheus/Makefile" ;; - "web/api/formatters/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/Makefile" ;; - "web/api/formatters/csv/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/csv/Makefile" ;; - "web/api/formatters/json/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/json/Makefile" ;; - "web/api/formatters/ssv/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/ssv/Makefile" ;; - "web/api/formatters/value/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/formatters/value/Makefile" ;; - "web/api/queries/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/Makefile" ;; - "web/api/queries/average/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/average/Makefile" ;; - "web/api/queries/des/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/des/Makefile" ;; - "web/api/queries/incremental_sum/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/incremental_sum/Makefile" ;; - "web/api/queries/max/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/max/Makefile" ;; - "web/api/queries/median/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/median/Makefile" ;; - "web/api/queries/min/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/min/Makefile" ;; - "web/api/queries/ses/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/ses/Makefile" ;; - "web/api/queries/stddev/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/stddev/Makefile" ;; - "web/api/queries/sum/Makefile") CONFIG_FILES="$CONFIG_FILES web/api/queries/sum/Makefile" ;; - "web/gui/Makefile") CONFIG_FILES="$CONFIG_FILES web/gui/Makefile" ;; - "web/server/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/Makefile" ;; - "web/server/single/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/single/Makefile" ;; - "web/server/multi/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/multi/Makefile" ;; - "web/server/static/Makefile") CONFIG_FILES="$CONFIG_FILES web/server/static/Makefile" ;; - - *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers - test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= ac_tmp= - trap 'exit_status=$? - : "${ac_tmp:=$tmp}" - { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 -ac_tmp=$tmp - -# Set up the scripts for CONFIG_FILES section. -# No need to generate them if there are no CONFIG_FILES. -# This happens for instance with `./config.status config.h'. -if test -n "$CONFIG_FILES"; then - - -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi -ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` -if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\\r' -else - ac_cs_awk_cr=$ac_cr -fi - -echo 'BEGIN {' >"$ac_tmp/subs1.awk" && -_ACEOF - - -{ - echo "cat >conf$$subs.awk <<_ACEOF" && - echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && - echo "_ACEOF" -} >conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 -ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - . ./conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - - ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` - if test $ac_delim_n = $ac_delim_num; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done -rm -f conf$$subs.sh - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && -_ACEOF -sed -n ' -h -s/^/S["/; s/!.*/"]=/ -p -g -s/^[^!]*!// -:repl -t repl -s/'"$ac_delim"'$// -t delim -:nl -h -s/\(.\{148\}\)..*/\1/ -t more1 -s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ -p -n -b repl -:more1 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t nl -:delim -h -s/\(.\{148\}\)..*/\1/ -t more2 -s/["\\]/\\&/g; s/^/"/; s/$/"/ -p -b -:more2 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t delim -' >$CONFIG_STATUS || ac_write_fail=1 -rm -f conf$$subs.awk -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACAWK -cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && - for (key in S) S_is_set[key] = 1 - FS = "" - -} -{ - line = $ 0 - nfields = split(line, field, "@") - substed = 0 - len = length(field[1]) - for (i = 2; i < nfields; i++) { - key = field[i] - keylen = length(key) - if (S_is_set[key]) { - value = S[key] - line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) - len += length(value) + length(field[++i]) - substed = 1 - } else - len += 1 + keylen - } - - print line -} - -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then - sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" -else - cat -fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ - || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 -_ACEOF - -# VPATH may cause trouble with some makes, so we remove sole $(srcdir), -# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ -h -s/// -s/^/:/ -s/[ ]*$/:/ -s/:\$(srcdir):/:/g -s/:\${srcdir}:/:/g -s/:@srcdir@:/:/g -s/^:*// -s/:*$// -x -s/\(=[ ]*\).*/\1/ -G -s/\n// -s/^[^=]*=[ ]*$// -}' -fi - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -fi # test -n "$CONFIG_FILES" - -# Set up the scripts for CONFIG_HEADERS section. -# No need to generate them if there are no CONFIG_HEADERS. -# This happens for instance with `./config.status Makefile'. -if test -n "$CONFIG_HEADERS"; then -cat >"$ac_tmp/defines.awk" <<\_ACAWK || -BEGIN { -_ACEOF - -# Transform confdefs.h into an awk script `defines.awk', embedded as -# here-document in config.status, that substitutes the proper values into -# config.h.in to produce config.h. - -# Create a delimiter string that does not exist in confdefs.h, to ease -# handling of long lines. -ac_delim='%!_!# ' -for ac_last_try in false false :; do - ac_tt=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_tt"; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done - -# For the awk script, D is an array of macro values keyed by name, -# likewise P contains macro parameters if any. Preserve backslash -# newline sequences. - -ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* -sed -n ' -s/.\{148\}/&'"$ac_delim"'/g -t rset -:rset -s/^[ ]*#[ ]*define[ ][ ]*/ / -t def -d -:def -s/\\$// -t bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3"/p -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p -d -:bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3\\\\\\n"\\/p -t cont -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p -t cont -d -:cont -n -s/.\{148\}/&'"$ac_delim"'/g -t clear -:clear -s/\\$// -t bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/"/p -d -:bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p -b cont -' >$CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - for (key in D) D_is_set[key] = 1 - FS = "" -} -/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { - line = \$ 0 - split(line, arg, " ") - if (arg[1] == "#") { - defundef = arg[2] - mac1 = arg[3] - } else { - defundef = substr(arg[1], 2) - mac1 = arg[2] - } - split(mac1, mac2, "(") #) - macro = mac2[1] - prefix = substr(line, 1, index(line, defundef) - 1) - if (D_is_set[macro]) { - # Preserve the white space surrounding the "#". - print prefix "define", macro P[macro] D[macro] - next - } else { - # Replace #undef with comments. This is necessary, for example, - # in the case of _POSIX_SOURCE, which is predefined and required - # on some systems where configure will not decide to define it. - if (defundef == "undef") { - print "/*", prefix defundef, macro, "*/" - next - } - } -} -{ print } -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 -fi # test -n "$CONFIG_HEADERS" - - -eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$ac_tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$ac_tmp/stdin" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - :F) - # - # CONFIG_FILE - # - - case $INSTALL in - [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; - *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; - esac - ac_MKDIR_P=$MKDIR_P - case $MKDIR_P in - [\\/$]* | ?:[\\/]* ) ;; - */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; - esac -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= -ac_sed_dataroot=' -/datarootdir/ { - p - q -} -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p' -case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; -esac -_ACEOF - -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_sed_extra="$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s|@configure_input@|$ac_sed_conf_input|;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@top_build_prefix@&$ac_top_build_prefix&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -s&@INSTALL@&$ac_INSTALL&;t t -s&@MKDIR_P@&$ac_MKDIR_P&;t t -$ac_datarootdir_hack -" -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ - >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ - "$ac_tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&2;} - - rm -f "$ac_tmp/stdin" - case $ac_file in - -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; - *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; - esac \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - ;; - :H) - # - # CONFIG_HEADER - # - if test x"$ac_file" != x-; then - { - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" - } >"$ac_tmp/config.h" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} - else - rm -f "$ac_file" - mv "$ac_tmp/config.h" "$ac_file" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - fi - else - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error $? "could not create -" "$LINENO" 5 - fi -# Compute "$ac_file"'s index in $config_headers. -_am_arg="$ac_file" -_am_stamp_count=1 -for _am_header in $config_headers :; do - case $_am_header in - $_am_arg | $_am_arg:* ) - break ;; - * ) - _am_stamp_count=`expr $_am_stamp_count + 1` ;; - esac -done -echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || -$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$_am_arg" : 'X\(//\)[^/]' \| \ - X"$_am_arg" : 'X\(//\)$' \| \ - X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$_am_arg" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'`/stamp-h$_am_stamp_count - ;; - - :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 -$as_echo "$as_me: executing $ac_file commands" >&6;} - ;; - esac - - - case $ac_file$ac_mode in - "depfiles":C) test x"$AMDEP_TRUE" != x"" || { - # Older Autoconf quotes --file arguments for eval, but not when files - # are listed without --file. Let's play safe and only enable the eval - # if we detect the quoting. - case $CONFIG_FILES in - *\'*) eval set x "$CONFIG_FILES" ;; - *) set x $CONFIG_FILES ;; - esac - shift - for mf - do - # Strip MF so we end up with the name of the file. - mf=`echo "$mf" | sed -e 's/:.*$//'` - # Check whether this is an Automake generated Makefile or not. - # We used to match only the files named 'Makefile.in', but - # some people rename them; so instead we look at the file content. - # Grep'ing the first line is not enough: some people post-process - # each Makefile.in and add a new line on top of each file to say so. - # Grep'ing the whole file is not good either: AIX grep has a line - # limit of 2048, but all sed's we know have understand at least 4000. - if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then - dirpart=`$as_dirname -- "$mf" || -$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$mf" : 'X\(//\)[^/]' \| \ - X"$mf" : 'X\(//\)$' \| \ - X"$mf" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$mf" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - else - continue - fi - # Extract the definition of DEPDIR, am__include, and am__quote - # from the Makefile without running 'make'. - DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` - test -z "$DEPDIR" && continue - am__include=`sed -n 's/^am__include = //p' < "$mf"` - test -z "$am__include" && continue - am__quote=`sed -n 's/^am__quote = //p' < "$mf"` - # Find all dependency output files, they are included files with - # $(DEPDIR) in their names. We invoke sed twice because it is the - # simplest approach to changing $(DEPDIR) to its actual value in the - # expansion. - for file in `sed -n " - s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ - sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do - # Make sure the directory exists. - test -f "$dirpart/$file" && continue - fdir=`$as_dirname -- "$file" || -$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$file" : 'X\(//\)[^/]' \| \ - X"$file" : 'X\(//\)$' \| \ - X"$file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir=$dirpart/$fdir; as_fn_mkdir_p - # echo "creating $dirpart/$file" - echo '# dummy' > "$dirpart/$file" - done - done -} - ;; - - esac -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit 1 -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - - -test "${with_math}" != "yes" && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: You are building without math. math allows accurate calculations. It should be enabled." >&5 -$as_echo "$as_me: WARNING: You are building without math. math allows accurate calculations. It should be enabled." >&2;} || : -test "${with_zlib}" != "yes" && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: You are building without zlib. zlib allows netdata to transfer a lot less data with web clients. It should be enabled." >&5 -$as_echo "$as_me: WARNING: You are building without zlib. zlib allows netdata to transfer a lot less data with web clients. It should be enabled." >&2;} || : diff --git a/configure.ac b/configure.ac index 06e3bf7af..6cea68836 100644 --- a/configure.ac +++ b/configure.ac @@ -4,31 +4,20 @@ # AC_PREREQ(2.60) -define([VERSION_MAJOR], [1]) -define([VERSION_MINOR], [11]) -define([VERSION_FIX], [0]) -define([VERSION_NUMBER], VERSION_MAJOR[.]VERSION_MINOR[.]VERSION_FIX) -define([VERSION_SUFFIX], [_rolling]) - -dnl Set to "1" for a first RPM release of a new version -PACKAGE_RPM_RELEASE="0.0.$(echo VERSION_SUFFIX | sed s/^_//)" - # We do not use m4_esyscmd_s to support older autoconf. -define([VERSION_STRING], m4_esyscmd(git describe 2>/dev/null | sed 's/^v//' | tr -d '\n')) -m4_ifval(VERSION_STRING, [], [define([VERSION_STRING], VERSION_NUMBER)]) +define([VERSION_STRING], m4_esyscmd([git describe --always 2>/dev/null | tr -d '\n'])) +define([VERSION_FROM_FILE], m4_esyscmd([cat packaging/version | tr -d '\n'])) +m4_ifval(VERSION_STRING, [], [define([VERSION_STRING], VERSION_FROM_FILE)]) -AC_INIT([netdata], VERSION_STRING[]VERSION_SUFFIX) +AC_INIT([netdata], VERSION_STRING[]) AM_MAINTAINER_MODE([disable]) if test x"$USE_MAINTAINER_MODE" = xyes; then AC_MSG_NOTICE(***************** MAINTAINER MODE *****************) -PACKAGE_BUILT_DATE=$(date '+%d %b %Y') fi -PACKAGE_RPM_VERSION="VERSION_NUMBER" +PACKAGE_RPM_VERSION="VERSION_STRING" AC_SUBST([PACKAGE_RPM_VERSION]) -AC_SUBST([PACKAGE_RPM_RELEASE]) - # ----------------------------------------------------------------------------- # autoconf initialization @@ -68,6 +57,13 @@ AC_ARG_ENABLE( , [enable_plugin_freeipmi="detect"] ) + AC_ARG_ENABLE( + [plugin-cups], + [AS_HELP_STRING([--enable-plugin-cups], [enable cups plugin @<:@default autodetect@:>@])], + , + [enable_plugin_cups="detect"] + ) + AC_ARG_ENABLE( [pedantic], [AS_HELP_STRING([--enable-pedantic], [enable pedantic compiler warnings @<:@default disabled@:>@])], @@ -410,6 +406,65 @@ AC_MSG_RESULT([${enable_plugin_freeipmi}]) AM_CONDITIONAL([ENABLE_PLUGIN_FREEIPMI], [test "${enable_plugin_freeipmi}" = "yes"]) +# ----------------------------------------------------------------------------- +# cups.plugin - libmnl, libnetfilter_acct + + AC_CHECK_LIB([cups], [ + cupsEncryption, + cupsFreeDests, + cupsFreeJobs, + cupsGetDests2, + cupsGetIntegerOption, + cupsGetJobs2, + cupsGetOption, + cupsServer, + httpClose, + httpConnect2, + ippPort +], + [AC_CHECK_HEADER( + [cups/cups.h], + [have_cups=yes], + [have_cups=no] + )], + [have_cups=no] +) + +test "${enable_plugin_cups}" = "yes" -a "${have_cups}" != "yes" && \ + AC_MSG_ERROR([cups required but not found. Try installing 'cups']) + +AC_ARG_WITH([cups-config], + [AS_HELP_STRING([--with-cups-config=path], [Specify path to cups-config executable.])], + [with_cups_config="$withval"], + [with_cups_config=system] + ) + +AS_IF([test "x$with_cups_config" != "xsystem"], [ + CUPSCONFIG=$with_cups_config +], [ + AC_PATH_TOOL(CUPSCONFIG, [cups-config]) + AS_IF([test -z "$CUPSCONFIG"], [ + have_cups=no + ]) +]) + +AC_MSG_CHECKING([if cups.plugin should be enabled]) +if test "${enable_plugin_cups}" != "no" -a "${have_cups}" = "yes"; then + enable_plugin_cups="yes" + AC_DEFINE([HAVE_CUPS], [1], [cups usability]) + + CUPS_CFLAGS="${CUPS_CFLAGS} `$CUPSCONFIG --cflags`" + CUPS_LIBS="${CUPS_LIBS} `$CUPSCONFIG --image --libs`" + + OPTIONAL_CUPS_CLFAGS="${CUPS_CFLAGS}" + OPTIONAL_CUPS_LIBS="${CUPS_LIBS}" +else + enable_plugin_cups="no" +fi +AC_MSG_RESULT([${enable_plugin_cups}]) +AM_CONDITIONAL([ENABLE_PLUGIN_CUPS], [test "${enable_plugin_cups}" = "yes"]) + + # ----------------------------------------------------------------------------- # nfacct.plugin - libmnl, libnetfilter_acct @@ -474,7 +529,7 @@ if test "${enable_lto}" != "no"; then fi if test "${have_lto}" = "yes"; then oCFLAGS="${CFLAGS}" - CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CLFAGS} ${OPTIONAL_NFACCT_CLFAGS} ${OPTIONAL_ZLIB_CLFAGS} ${OPTIONAL_UUID_CLFAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS}" + CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CLFAGS} ${OPTIONAL_NFACCT_CLFAGS} ${OPTIONAL_ZLIB_CLFAGS} ${OPTIONAL_UUID_CLFAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CLFAGS}" ac_cv_c_lto_cross_compile="${enable_lto}" test "${ac_cv_c_lto_cross_compile}" != "yes" && ac_cv_c_lto_cross_compile="no" AC_C_LTO @@ -545,6 +600,9 @@ AC_SUBST([OPTIONAL_LIBCAP_CFLAGS]) AC_SUBST([OPTIONAL_LIBCAP_LIBS]) AC_SUBST([OPTIONAL_IPMIMONITORING_CFLAGS]) AC_SUBST([OPTIONAL_IPMIMONITORING_LIBS]) +AC_SUBST([OPTIONAL_CUPS_CFLAGS]) +AC_SUBST([OPTIONAL_CUPS_LIBS]) + AC_CONFIG_FILES([ Makefile @@ -563,6 +621,7 @@ AC_CONFIG_FILES([ collectors/fping.plugin/Makefile collectors/freebsd.plugin/Makefile collectors/freeipmi.plugin/Makefile + collectors/cups.plugin/Makefile collectors/idlejitter.plugin/Makefile collectors/macos.plugin/Makefile collectors/nfacct.plugin/Makefile @@ -572,7 +631,6 @@ AC_CONFIG_FILES([ collectors/python.d.plugin/Makefile collectors/statsd.plugin/Makefile collectors/tc.plugin/Makefile - contrib/Makefile daemon/Makefile database/Makefile diagrams/Makefile @@ -596,7 +654,6 @@ AC_CONFIG_FILES([ libnetdata/storage_number/Makefile libnetdata/threads/Makefile libnetdata/url/Makefile - makeself/Makefile registry/Makefile streaming/Makefile system/Makefile @@ -622,10 +679,9 @@ AC_CONFIG_FILES([ web/api/queries/ses/Makefile web/api/queries/stddev/Makefile web/api/queries/sum/Makefile + web/api/health/Makefile web/gui/Makefile web/server/Makefile - web/server/single/Makefile - web/server/multi/Makefile web/server/static/Makefile ]) AC_OUTPUT diff --git a/contrib/Makefile.am b/contrib/Makefile.am deleted file mode 100644 index 80d80d371..000000000 --- a/contrib/Makefile.am +++ /dev/null @@ -1,33 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in - -dist_noinst_DATA = \ - README.md \ - debian/copyright \ - debian/netdata.conf \ - debian/source/format \ - debian/control.wheezy \ - debian/compat \ - debian/netdata.install \ - debian/netdata.lintian-overrides \ - debian/rules \ - debian/netdata.docs \ - debian/netdata.default \ - debian/control \ - debian/netdata.postinst.in \ - debian/netdata.service \ - debian/changelog \ - debian/netdata.postrm \ - rhel/build-netdata-rpm.sh \ - $(NULL) - -dist_noinst_SCRIPTS = \ - debian/netdata.init \ - $(NULL) - -debian/changelog: - echo "netdata ($(PACKAGE_VERSION)) UNRELEASED; urgency=medium" | \ - tr '_' '~' > $@ - echo " * Latest release" >> $@ - echo " -- Netdata Team <> `date -R`" >> $@ diff --git a/contrib/Makefile.in b/contrib/Makefile.in deleted file mode 100644 index 789a7bfd9..000000000 --- a/contrib/Makefile.in +++ /dev/null @@ -1,491 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = contrib -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -SCRIPTS = $(dist_noinst_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - debian/copyright \ - debian/netdata.conf \ - debian/source/format \ - debian/control.wheezy \ - debian/compat \ - debian/netdata.install \ - debian/netdata.lintian-overrides \ - debian/rules \ - debian/netdata.docs \ - debian/netdata.default \ - debian/control \ - debian/netdata.postinst.in \ - debian/netdata.service \ - debian/changelog \ - debian/netdata.postrm \ - rhel/build-netdata-rpm.sh \ - $(NULL) - -dist_noinst_SCRIPTS = \ - debian/netdata.init \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu contrib/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu contrib/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -debian/changelog: - echo "netdata ($(PACKAGE_VERSION)) UNRELEASED; urgency=medium" | \ - tr '_' '~' > $@ - echo " * Latest release" >> $@ - echo " -- Netdata Team <> `date -R`" >> $@ - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/contrib/README.md b/contrib/README.md index 60bcf3f28..c5ce873a7 100644 --- a/contrib/README.md +++ b/contrib/README.md @@ -56,3 +56,5 @@ The recommended way to upgrade netdata packages built from this source is to remove the current package from your system, then install the new package. Upgrading on wheezy is known to not work cleanly; Jessie may behave as expected. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcontrib%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/contrib/debian/changelog b/contrib/debian/changelog deleted file mode 100644 index ee5e82ebd..000000000 --- a/contrib/debian/changelog +++ /dev/null @@ -1,3 +0,0 @@ -netdata (1.11.1~rolling) UNRELEASED; urgency=medium - * Latest release - -- Netdata Team <> Thu, 22 Nov 2018 20:33:40 +0000 diff --git a/contrib/rhel/build-netdata-rpm.sh b/contrib/rhel/build-netdata-rpm.sh index 927318fb1..df33d8068 100755 --- a/contrib/rhel/build-netdata-rpm.sh +++ b/contrib/rhel/build-netdata-rpm.sh @@ -5,7 +5,7 @@ cd "$(dirname "$0")/../../" || exit 1 # shellcheck disable=SC1091 -source "installer/functions.sh" || exit 1 +source "packaging/installer/functions.sh" || exit 1 set -e diff --git a/contrib/sles11/README.md b/contrib/sles11/README.md new file mode 100644 index 000000000..d052b9454 --- /dev/null +++ b/contrib/sles11/README.md @@ -0,0 +1,11 @@ +# spec to build netdata RPM for sles 11 + +Based on [opensuse rpm spec](https://build.opensuse.org/package/show/network/netdata) with some +changes and additions for sles 11 backport, namely: +- init.d script +- run-time dependency on python ordereddict backport +- patch for netdata python.d plugin to work with older python +- crude hack of notification script to work with bash 3 (email and syslog only, one destination, + see comments at the top) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcontrib%2Fsles11%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/contrib/sles11/alarm-notify-basic.bash3.sh b/contrib/sles11/alarm-notify-basic.bash3.sh new file mode 100755 index 000000000..df382923e --- /dev/null +++ b/contrib/sles11/alarm-notify-basic.bash3.sh @@ -0,0 +1,755 @@ +#!/usr/bin/env bash + +# basic version of netdata notifier to work with bash3 +# only mail and syslog destinations are supported, one recipient each +# - email: DEFAULT_RECIPIENT_EMAIL, "root" by default +# - syslog: "netdata" with local6 facility; disabled by default +# - also: setting recipient to "disabled" or "silent" stops notifications for this alert + +# in /etc/netdata/health_alarm_notify.conf set something like +# EMAIL_SENDER="netdata@gesdev-vm.m0.maxidom.ru" +# SEND_EMAIL="YES" +# DEFAULT_RECIPIENT_EMAIL="root" +# SEND_SYSLOG="YES" +# SYSLOG_FACILITY="local6" +# DEFAULT_RECIPIENT_SYSLOG="netdata" + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2017 Costa Tsaousis +# GPL v3+ +# +# Script to send alarm notifications for netdata +# +# Supported notification methods: +# - emails by @ktsaou +# - syslog messages by @Ferroin +# - all the rest is pruned :) + +# ----------------------------------------------------------------------------- +# testing notifications + +if [ \( "${1}" = "test" -o "${2}" = "test" \) -a "${#}" -le 2 ] +then + if [ "${2}" = "test" ] + then + recipient="${1}" + else + recipient="${2}" + fi + + [ -z "${recipient}" ] && recipient="sysadmin" + + id=1 + last="CLEAR" + test_res=0 + for x in "WARNING" "CRITICAL" "CLEAR" + do + echo >&2 + echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}" + + "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value" + if [ $? -ne 0 ] + then + echo >&2 "# FAILED" + test_res=1 + else + echo >&2 "# OK" + fi + + last="${x}" + id=$((id + 1)) + done + + exit $test_res +fi + +export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" +export LC_ALL=C + +# ----------------------------------------------------------------------------- + +PROGRAM_NAME="$(basename "${0}")" + +logdate() { + date "+%Y-%m-%d %H:%M:%S" +} + +log() { + local status="${1}" + shift + + echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" + +} + +warning() { + log WARNING "${@}" +} + +error() { + log ERROR "${@}" +} + +info() { + log INFO "${@}" +} + +fatal() { + log FATAL "${@}" + exit 1 +} + +debug=${NETDATA_ALARM_NOTIFY_DEBUG-0} +debug() { + [ "${debug}" = "1" ] && log DEBUG "${@}" +} + +docurl() { + if [ -z "${curl}" ] + then + error "\${curl} is unset." + return 1 + fi + + if [ "${debug}" = "1" ] + then + echo >&2 "--- BEGIN curl command ---" + printf >&2 "%q " ${curl} "${@}" + echo >&2 + echo >&2 "--- END curl command ---" + + local out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX) + local code=$(${curl} ${curl_options} --write-out %{http_code} --output "${out}" --silent --show-error "${@}") + local ret=$? + echo >&2 "--- BEGIN received response ---" + cat >&2 "${out}" + echo >&2 + echo >&2 "--- END received response ---" + echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}" + rm "${out}" + echo "${code}" + return ${ret} + fi + + ${curl} ${curl_options} --write-out %{http_code} --output /dev/null --silent --show-error "${@}" + return $? +} + +# ----------------------------------------------------------------------------- +# this is to be overwritten by the config file + +custom_sender() { + info "not sending custom notification for ${status} of '${host}.${chart}.${name}'" +} + + +# ----------------------------------------------------------------------------- + +# check for BASH v4+ (required for associative arrays) +[ $(( ${BASH_VERSINFO[0]} )) -lt 3 ] && \ + fatal "BASH version 3 or later is required (this is ${BASH_VERSION})." + +# ----------------------------------------------------------------------------- +# defaults to allow running this script by hand + +[ -z "${NETDATA_CONFIG_DIR}" ] && NETDATA_CONFIG_DIR="$(dirname "${0}")/../../../../etc/netdata" +[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="$(dirname "${0}")/../../../../var/cache/netdata" +[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io" + +# ----------------------------------------------------------------------------- +# parse command line parameters + +roles="${1}" # the roles that should be notified for this event +host="${2}" # the host generated this event +unique_id="${3}" # the unique id of this event +alarm_id="${4}" # the unique id of the alarm that generated this event +event_id="${5}" # the incremental id of the event, for this alarm id +when="${6}" # the timestamp this event occurred +name="${7}" # the name of the alarm, as given in netdata health.d entries +chart="${8}" # the name of the chart (type.id) +family="${9}" # the family of the chart +status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL +old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL +value="${12}" # the current value of the alarm +old_value="${13}" # the previous value of the alarm +src="${14}" # the line number and file the alarm has been configured +duration="${15}" # the duration in seconds of the previous alarm state +non_clear_duration="${16}" # the total duration in seconds this is/was non-clear +units="${17}" # the units of the value +info="${18}" # a short description of the alarm +value_string="${19}" # friendly value (with units) +old_value_string="${20}" # friendly old value (with units) + +# ----------------------------------------------------------------------------- +# find a suitable hostname to use, if netdata did not supply a hostname + +this_host=$(hostname -s 2>/dev/null) +[ -z "${host}" ] && host="${this_host}" + +# ----------------------------------------------------------------------------- +# screen statuses we don't need to send a notification + +# don't do anything if this is not WARNING, CRITICAL or CLEAR +if [ "${status}" != "WARNING" -a "${status}" != "CRITICAL" -a "${status}" != "CLEAR" ] +then + info "not sending notification for ${status} of '${host}.${chart}.${name}'" + exit 1 +fi + +# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL +if [ "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ] +then + info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})" + exit 1 +fi + +# ----------------------------------------------------------------------------- +# load configuration + +# By default fetch images from the global public registry. +# This is required by default, since all notification methods need to download +# images via the Internet, and private registries might not be reachable. +# This can be overwritten at the configuration file. +images_base_url="https://registry.my-netdata.io" + +# curl options to use +curl_options= + +# needed commands +# if empty they will be searched in the system path +curl= +sendmail= + +# enable / disable features +SEND_EMAIL="YES" +SEND_SYSLOG="YES" + +# syslog configs +SYSLOG_FACILITY="local6" + +# email configs +EMAIL_SENDER= +DEFAULT_RECIPIENT_EMAIL="root" +EMAIL_CHARSET=$(locale charmap 2>/dev/null) + +# load the user configuration +# this will overwrite the variables above +if [ -f "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf" ] + then + source "${NETDATA_CONFIG_DIR}/health_alarm_notify.conf" +else + error "Cannot find file ${NETDATA_CONFIG_DIR}/health_alarm_notify.conf. Using internal defaults." +fi + +# If we didn't autodetect the character set for e-mail and it wasn't +# set by the user, we need to set it to a reasonable default. UTF-8 +# should be correct for almost all modern UNIX systems. +if [ -z ${EMAIL_CHARSET} ] + then + EMAIL_CHARSET="UTF-8" +fi + +# disable if role = silent or disabled +if [[ "${role}" = "silent" || "${role}" = "disabled" ]]; then + SEND_EMAIL="NO" + SEND_SYSLOG="NO" +fi + +if [[ "SEND_EMAIL" != "NO" ]]; then + to_email=$DEFAULT_RECIPIENT_EMAIL +else + to_email='' +fi + +if [[ "SEND_SYSLOG" != "NO" ]]; then + to_syslog=$DEFAULT_RECIPIENT_SYSLOG +else + to_syslog='' +fi + +# if we need sendmail, check for the sendmail command +if [ "${SEND_EMAIL}" = "YES" -a -z "${sendmail}" ] + then + sendmail="$(which sendmail 2>/dev/null || command -v sendmail 2>/dev/null)" + if [ -z "${sendmail}" ] + then + debug "Cannot find sendmail command in the system path. Disabling email notifications." + SEND_EMAIL="NO" + fi +fi + +# if we need logger, check for the logger command +if [ "${SEND_SYSLOG}" = "YES" -a -z "${logger}" ] + then + logger="$(which logger 2>/dev/null || command -v logger 2>/dev/null)" + if [ -z "${logger}" ] + then + debug "Cannot find logger command in the system path. Disabling syslog notifications." + SEND_SYSLOG="NO" + fi +fi + +# check that we have at least a method enabled +if [ "${SEND_EMAIL}" != "YES" \ + -a "${SEND_SYSLOG}" != "YES" \ + ] + then + fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'." +fi + +# ----------------------------------------------------------------------------- +# get the date the alarm happened + +date=$(date --date=@${when} "${DATE_FORMAT}" 2>/dev/null) +[ -z "${date}" ] && date=$(date "${DATE_FORMAT}" 2>/dev/null) +[ -z "${date}" ] && date=$(date --date=@${when} 2>/dev/null) +[ -z "${date}" ] && date=$(date 2>/dev/null) + +# ----------------------------------------------------------------------------- +# function to URL encode a string + +urlencode() { + local string="${1}" strlen encoded pos c o + + strlen=${#string} + for (( pos=0 ; pos ]] + then + # the name includes single quotes + opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)" + elif [[ "${EMAIL_SENDER}" =~ \'.*\'\ \<.*\> ]] + then + # the name includes double quotes + opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F $(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)" + elif [[ "${EMAIL_SENDER}" =~ .*\ \<.*\> ]] + then + # the name does not have any quotes + opts=" -f $(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1) -F '$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)'" + else + # no name at all + opts=" -f ${EMAIL_SENDER}" + fi + fi + + if [[ "${debug}" = "1" ]] + then + echo >&2 "--- BEGIN sendmail command ---" + printf >&2 "%q " "${sendmail}" -t ${opts} + echo >&2 + echo >&2 "--- END sendmail command ---" + fi + + "${sendmail}" -t ${opts} + ret=$? + + if [ ${ret} -eq 0 ] + then + info "sent email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}'" + return 0 + else + error "failed to send email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}' with error code ${ret}." + return 1 + fi + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# syslog sender + +send_syslog() { + local facility=${SYSLOG_FACILITY:-"local6"} level='info' targets="${1}" + local priority='' message='' host='' port='' prefix='' + local temp1='' temp2='' + + [[ "${SEND_SYSLOG}" == "YES" ]] || return 1 + + if [[ "${status}" == "CRITICAL" ]] ; then + level='crit' + elif [[ "${status}" == "WARNING" ]] ; then + level='warning' + fi + + for target in ${targets} ; do + priority="${facility}.${level}" + message='' + host='' + port='' + prefix='' + temp1='' + temp2='' + + prefix=$(echo ${target} | cut -d '/' -f 2) + temp1=$(echo ${target} | cut -d '/' -f 1) + + if [[ ${prefix} != ${temp1} ]] ; then + if (echo ${temp1} | grep -q '@' ) ; then + temp2=$(echo ${temp1} | cut -d '@' -f 1) + host=$(echo ${temp1} | cut -d '@' -f 2) + + if [ ${temp2} != ${host} ] ; then + priority=${temp2} + fi + + port=$(echo ${host} | rev | cut -d ':' -f 1 | rev) + + if ( echo ${host} | grep -E -q '\[.*\]' ) ; then + if ( echo ${port} | grep -q ']' ) ; then + port='' + else + host=$(echo ${host} | rev | cut -d ':' -f 2- | rev) + fi + else + if [ ${port} = ${host} ] ; then + port='' + else + host=$(echo ${host} | cut -d ':' -f 1) + fi + fi + else + priority=${temp1} + fi + fi + + # message="${prefix} ${status} on ${host} at ${date}: ${chart} ${value_string}" + + message="${prefix} ${status}: ${chart} ${value_string}" + + if [ ${host} ] ; then + logger_options="${logger_options} -n ${host}" + if [ ${port} ] ; then + logger_options="${logger_options} -P ${port}" + fi + fi + + ${logger} -p ${priority} ${logger_options} "${message}" + done + + return $? +} + + +# ----------------------------------------------------------------------------- +# prepare the content of the notification + +# the url to send the user on click +urlencode "${host}" >/dev/null; url_host="${REPLY}" +urlencode "${chart}" >/dev/null; url_chart="${REPLY}" +urlencode "${family}" >/dev/null; url_family="${REPLY}" +urlencode "${name}" >/dev/null; url_name="${REPLY}" +goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}" + +# the severity of the alarm +severity="${status}" + +# the time the alarm was raised +duration4human ${duration} >/dev/null; duration_txt="${REPLY}" +duration4human ${non_clear_duration} >/dev/null; non_clear_duration_txt="${REPLY}" +raised_for="(was ${old_status} for ${duration_txt})" + +# the key status message +status_message="status unknown" + +# the color of the alarm +color="grey" + +# the alarm value +alarm="${name//_/ } = ${value_string}" + +# the image of the alarm +image="${images_base_url}/images/seo-performance-128.png" + +# prepare the title based on status +case "${status}" in + CRITICAL) + image="${images_base_url}/images/alert-128-red.png" + status_message="is critical" + color="#ca414b" + ;; + + WARNING) + image="${images_base_url}/images/alert-128-orange.png" + status_message="needs attention" + color="#ffc107" + ;; + + CLEAR) + image="${images_base_url}/images/check-mark-2-128-green.png" + status_message="recovered" + color="#77ca6d" + ;; +esac + +if [ "${status}" = "CLEAR" ] +then + severity="Recovered from ${old_status}" + if [ ${non_clear_duration} -gt ${duration} ] + then + raised_for="(alarm was raised for ${non_clear_duration_txt})" + fi + + # don't show the value when the status is CLEAR + # for certain alarms, this value might not have any meaning + alarm="${name//_/ } ${raised_for}" + +elif [ "${old_status}" = "WARNING" -a "${status}" = "CRITICAL" ] +then + severity="Escalated to ${status}" + if [ ${non_clear_duration} -gt ${duration} ] + then + raised_for="(alarm is raised for ${non_clear_duration_txt})" + fi + +elif [ "${old_status}" = "CRITICAL" -a "${status}" = "WARNING" ] +then + severity="Demoted to ${status}" + if [ ${non_clear_duration} -gt ${duration} ] + then + raised_for="(alarm is raised for ${non_clear_duration_txt})" + fi + +else + raised_for= +fi + +# prepare HTML versions of elements +info_html= +[ ! -z "${info}" ] && info_html="
${info}
" + +raised_for_html= +[ ! -z "${raised_for}" ] && raised_for_html="
${raised_for}" + + +# ----------------------------------------------------------------------------- +# send the syslog message + +send_syslog ${to_syslog} + +SENT_SYSLOG=$? + +# ----------------------------------------------------------------------------- +# send the email + +send_email < + + + + + + + + + +
+
+ + + + + + + + + + + + +
+
netdata notification
+
+

${host} ${status_message}

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ ${chart} + Chart +
+ ${value_string} + Value +
+ ${alarm}${info_html} + Alarm +
+ ${family} + Family +
+ ${severity} + Severity +
${date} + ${raised_for_html} Time +
+ View Netdata +
The source of this alarm is line ${src}
(alarms are configurable, edit this file to adapt the alarm to your needs) +
Sent by + netdata, the real-time performance and health monitoring, on ${this_host}. +
+
+
+
+
+ + +--multipart-boundary-- +EOF + +SENT_EMAIL=$? + +# ----------------------------------------------------------------------------- +# let netdata know + +if [ ${SENT_EMAIL} -eq 0 \ + -o ${SENT_SYSLOG} -eq 0 \ + ] + then + # we did send something + exit 0 +fi + +# we did not send anything +exit 1 diff --git a/contrib/sles11/netdata-alarms-bash3.patch b/contrib/sles11/netdata-alarms-bash3.patch new file mode 100644 index 000000000..142659ba9 --- /dev/null +++ b/contrib/sles11/netdata-alarms-bash3.patch @@ -0,0 +1,10 @@ +--- system/netdata.conf.orig 2018-05-10 19:44:49.000000000 +0300 ++++ system/netdata.conf 2018-05-10 19:45:14.000000000 +0300 +@@ -22,3 +22,7 @@ + [web] + web files owner = root + web files group = netdata ++ ++[health] ++ # script for sles 11, mail notifications only ++ script to execute on alarm = /usr/lib64/netdata/plugins.d/alarm-notify.bash3.sh diff --git a/contrib/sles11/netdata-automake-no-dist-xz.patch b/contrib/sles11/netdata-automake-no-dist-xz.patch new file mode 100644 index 000000000..d373efac2 --- /dev/null +++ b/contrib/sles11/netdata-automake-no-dist-xz.patch @@ -0,0 +1,13 @@ +diff -u netdata-1.6.0.orig/Makefile.am netdata-1.6.0/Makefile.am +--- netdata-1.6.0.orig/Makefile.am 2017-03-20 19:32:47.000000000 +0100 ++++ netdata-1.6.0/Makefile.am 2017-06-25 23:46:14.403426661 +0200 +@@ -1,7 +1,7 @@ + # + # Copyright (C) 2015 Alon Bar-Lev + # +-AUTOMAKE_OPTIONS=foreign dist-bzip2 dist-xz 1.10 ++AUTOMAKE_OPTIONS=foreign dist-bzip2 1.10 + ACLOCAL_AMFLAGS = -I m4 + + MAINTAINERCLEANFILES= \ + diff --git a/contrib/sles11/netdata-python-plugin-sles11.patch b/contrib/sles11/netdata-python-plugin-sles11.patch new file mode 100644 index 000000000..d2e8f6b69 --- /dev/null +++ b/contrib/sles11/netdata-python-plugin-sles11.patch @@ -0,0 +1,28 @@ +diff -u netdata-1.10.0.orig/plugins.d/python.d.plugin netdata-1.10.0/plugins.d/python.d.plugin +--- netdata-1.10.0.orig/plugins.d/python.d.plugin 2018-05-08 20:01:40.000000000 +0300 ++++ netdata-1.10.0/plugins.d/python.d.plugin 2018-05-08 20:06:57.000000000 +0300 +@@ -15,10 +15,8 @@ + from sys import version_info, argv + from time import sleep + +-try: +- from time import monotonic as time +-except ImportError: +- from time import time ++# from time import monotonic as time ++from time import time + + PY_VERSION = version_info[:2] + PLUGIN_CONFIG_DIR = os.getenv('NETDATA_CONFIG_DIR', os.path.dirname(__file__) + '/../../../../etc/netdata') + '/' +@@ -32,10 +30,7 @@ + from bases.loggers import PythonDLogger + from bases.collection import setdefault_values, run_and_exit + +-try: +- from collections import OrderedDict +-except ImportError: +- from third_party.ordereddict import OrderedDict ++from ordereddict import OrderedDict + + BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1), + 'retries': 60, diff --git a/contrib/sles11/netdata.init b/contrib/sles11/netdata.init new file mode 100755 index 000000000..3081c427c --- /dev/null +++ b/contrib/sles11/netdata.init @@ -0,0 +1,65 @@ +#!/bin/bash +# +### BEGIN INIT INFO +# Provides: netdata +# Required-Start: $all +# Should-Start: +# Required-Stop: $all +# Should-Stop: +# Default-Start: 2 3 5 +# Default-Stop: +# Short-Description: Start and stop the netdata real-time monitoring server daemon +# Description: Controls the main netdata monitoring server daemon "netdata". +### END INIT INFO + +DAEMON="netdata" +DAEMON_BIN="/usr/sbin/${DAEMON}" +DAEMON_PID="/var/run/${DAEMON}.pid" +DAEMON_ARGS="" + +. /etc/rc.status +rc_reset + +if [ ! -x $DAEMON_BIN ]; then + echo -n >&2 "${DAEMON} binary is not installed. " + rc_status -s + exit 5 +fi + +case "$1" in + start) + echo -n "Starting $DAEMON" + /sbin/startproc $DAEMON_BIN $DAEMON_ARGS + rc_status -v + ;; + + stop) + echo -n "Stopping $DAEMON" + /sbin/killproc $DAEMON_BIN + rc_status -v + ;; + + reload) + # netdata: HUP reopen log files, USR1 save DB, USR2 reload health config + echo -n "Reloading $DAEMON config" + /sbin/killproc -USR2 $DAEMON_BIN + ;; + + restart) + $0 stop + $0 start + ;; + + status) + echo -n "Checking $DAEMON" + /sbin/checkproc $DAEMON_BIN + rc_status -v + ;; + + *) + echo "Usage: $0 {start|stop|status|reload|restart}" + exit 1 + ;; + +esac +rc_exit diff --git a/daemon/Makefile.am b/daemon/Makefile.am index bffc864dd..9611f223f 100644 --- a/daemon/Makefile.am +++ b/daemon/Makefile.am @@ -2,8 +2,19 @@ AUTOMAKE_OPTIONS = subdir-objects MAINTAINERCLEANFILES= $(srcdir)/Makefile.in +CLEANFILES = \ + anonymous-statistics.sh \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in dist_noinst_DATA = \ README.md \ config/README.md \ + anonymous-statistics.sh.in \ + $(NULL) + +dist_plugins_SCRIPTS = \ + anonymous-statistics.sh \ $(NULL) diff --git a/daemon/Makefile.in b/daemon/Makefile.in deleted file mode 100644 index 7111dfa1a..000000000 --- a/daemon/Makefile.in +++ /dev/null @@ -1,465 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = daemon -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - config/README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu daemon/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu daemon/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/daemon/README.md b/daemon/README.md index 305fc961d..858394c77 100644 --- a/daemon/README.md +++ b/daemon/README.md @@ -1,4 +1,4 @@ -# Running the Netdata Daemon +# Netdata daemon ## Starting netdata @@ -289,7 +289,7 @@ If you want to control it entirely via systemd, you can set in `netdata.conf`: Using the above, whatever OOM Score you have set at `netdata.service` will be maintained by netdata. -## netdata process scheduling policy +## Netdata process scheduling policy By default netdata runs with the `idle` process scheduling policy, so that it uses CPU resources, only when there is idle CPU to spare. On very busy servers (or weak servers), this can lead to gaps on the charts. @@ -409,20 +409,17 @@ sudo systemctl daemon-reload sudo systemctl restart netdata ``` -## virtual memory +## Virtual memory -You may notice that netdata's virtual memory size, as reported by `ps` or `/proc/pid/status` -(or even netdata's applications virtual memory chart) is unrealistically high. +You may notice that netdata's virtual memory size, as reported by `ps` or `/proc/pid/status` (or even netdata's applications virtual memory chart) is unrealistically high. -For example, it may be reported to be 150+MB, even if the resident memory size is just 25MB. -Similar values may be reported for netdata plugins too. +For example, it may be reported to be 150+MB, even if the resident memory size is just 25MB. Similar values may be reported for netdata plugins too. -Check this for example: A netdata installation with default settings on Ubuntu 16.04LTS. -The top chart is **real memory used**, while the bottom one is **virtual memory**: +Check this for example: A netdata installation with default settings on Ubuntu 16.04LTS. The top chart is **real memory used**, while the bottom one is **virtual memory**: ![image](https://cloud.githubusercontent.com/assets/2662304/19013772/5eb7173e-87e3-11e6-8f2b-a2ccfeb06faf.png) -#### why this happens? +**Why does this happen?** The system memory allocator allocates virtual memory arenas, per thread running. On Linux systems this defaults to 16MB per thread on 64 bit machines. So, if you get the @@ -437,21 +434,16 @@ linux (that uses **musl** instead of **glibc**) is this: ![image](https://cloud.githubusercontent.com/assets/2662304/19013807/7cf5878e-87e4-11e6-9651-082e68701eab.png) -#### can we do anything to lower it? +**Can we do anything to lower it?** -Since netdata already uses minimal memory allocations while it runs (i.e. it adapts its memory -on start, so that while repeatedly collects data it does not do memory allocations), it already -instructs the system memory allocator to minimize the memory arenas for each thread. We have also -added [2 configuration options](https://github.com/netdata/netdata/blob/5645b1ee35248d94e6931b64a8688f7f0d865ec6/src/main.c#L410-L418) -to allow you tweak these settings. +Since netdata already uses minimal memory allocations while it runs (i.e. it adapts its memory on start, so that while repeatedly collects data it does not do memory allocations), it already instructs the system memory allocator to minimize the memory arenas for each thread. We have also added [2 configuration options](https://github.com/netdata/netdata/blob/5645b1ee35248d94e6931b64a8688f7f0d865ec6/src/main.c#L410-L418) +to allow you tweak these settings: `glibc malloc arena max for plugins` and `glibc malloc arena max for netdata`. -However, even if we instructed the memory allocator to use just one arena, it seems it allocates -an arena per thread. +However, even if we instructed the memory allocator to use just one arena, it seems it allocates an arena per thread. -netdata also supports `jemalloc` and `tcmalloc`, however both behave exactly the same to the -glibc memory allocator in this aspect. +netdata also supports `jemalloc` and `tcmalloc`, however both behave exactly the same to the glibc memory allocator in this aspect. -#### Is this a problem? +**Is this a problem?** No, it is not. @@ -524,3 +516,5 @@ valgrind $(which netdata) -D netdata will start and it will be a lot slower. Now reproduce the crash and `valgrind` will dump on your console the stack trace. Open a new github issue and post the output. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/daemon/anonymous-statistics.sh.in b/daemon/anonymous-statistics.sh.in new file mode 100755 index 000000000..f4375b101 --- /dev/null +++ b/daemon/anonymous-statistics.sh.in @@ -0,0 +1,197 @@ +#!/usr/bin/env sh + +# Valid actions: + +# - FATAL - netdata exited due to a fatal condition +# ACTION_RESULT -- program name and thread tag +# ACTION_DATA -- fmt, args passed to fatal +# - START - netdata started +# ACTION_DATA -- nan +# - EXIT - installation action +# ACTION_DATA -- ret value of + +ACTION="${1}" +ACTION_RESULT="${2}" +ACTION_DATA="${3}" +ACTION_DATA=$(echo "${ACTION_DATA}" | tr '"' "'") + +# ------------------------------------------------------------------------------------------------- +# check opt-out + +if [ -f "@configdir_POST@/.opt-out-from-anonymous-statistics" ]; then + exit 0 +fi + +# ------------------------------------------------------------------------------------------------- +# detect the operating system + +OS_DETECTION="unknown" +NAME="unknown" +VERSION="unknown" +VERSION_ID="unknown" +ID="unknown" +ID_LIKE="unknown" + +if [ -f "/etc/os-release" ]; then + OS_DETECTION="/etc/os-release" + eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" /dev/null)" ]; then + if [ "${OS_DETECTION}" = "unknown" ]; then OS_DETECTION="lsb_release"; else OS_DETECTION="Mixed"; fi + if [ "${NAME}" = "unknown" ]; then NAME="$(lsb_release -is 2>/dev/null)"; fi + if [ "${VERSION}" = "unknown" ]; then VERSION="$(lsb_release -rs 2>/dev/null)"; fi + if [ "${ID}" = "unknown" ]; then ID="$(lsb_release -cs 2>/dev/null)"; fi + fi +fi + +# ------------------------------------------------------------------------------------------------- +# detect the kernel + +KERNEL_NAME="$(uname -s)" +KERNEL_VERSION="$(uname -r)" +ARCHITECTURE="$(uname -m)" + +# ------------------------------------------------------------------------------------------------- +# detect the virtualization + +VIRTUALIZATION="unknown" +VIRT_DETECTION="none" +CONTAINER="unknown" +CONT_DETECTION="none" + +if [ -n "$(command -v systemd-detect-virt 2>/dev/null)" ]; then + VIRTUALIZATION="$(systemd-detect-virt -v)" + VIRT_DETECTION="systemd-detect-virt" + CONTAINER="$(systemd-detect-virt -c)" + CONT_DETECTION="systemd-detect-virt" +else + if grep -q "^flags.*hypervisor" /proc/cpuinfo 2>/dev/null; then + VIRTUALIZATION="hypervisor" + VIRT_DETECTION="/proc/cpuinfo" + fi +fi + +# ------------------------------------------------------------------------------------------------- +# detect containers with heuristics + +if [ "${CONTAINER}" = "unknown" ] ; then + IFS='(, ' read -r process _ /dev/null 2>&1; then + CONTAINER="container" + CONT_DETECTION="/bin/running-in-container" + fi + + # lxc sets environment variable 'container' + #shellcheck disable=SC2154 + if [ -n "${container}" ]; then + CONTAINER="lxc" + CONT_DETECTION="containerenv" + fi + + # docker creates /.dockerenv + # http://stackoverflow.com/a/25518345 + if [ -f "/.dockerenv" ]; then + CONTAINER="docker" + CONT_DETECTION="dockerenv" + fi +fi + +# ------------------------------------------------------------------------------------------------- +# check netdata version + +if [ -z "${NETDATA_VERSION}" ]; then + NETDATA_VERSION="uknown" + netdata -V >/dev/null 2>&1 && NETDATA_VERSION="$(netdata -V 2>&1 | cut -d ' ' -f 2)" +fi + +# ------------------------------------------------------------------------------------------------- +# check netdata unique id +if [ -z "${NETDATA_REGISTRY_UNIQUE_ID}" ] ; then + if [ -f "@registrydir_POST@/netdata.public.unique.id" ]; then + NETDATA_REGISTRY_UNIQUE_ID="$(cat "@registrydir_POST@/netdata.public.unique.id")" + else + NETDATA_REGISTRY_UNIQUE_ID="unknown" + fi +fi + + +# ------------------------------------------------------------------------------------------------- +# send the anonymous statistics to GA +# https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters +if [ -n "$(command -v curl 2>/dev/null)" ]; then + curl -X POST -Ss --max-time 2 \ + --data "v=1" \ + --data "tid=UA-64295674-3" \ + --data "aip=1" \ + --data "ds=shell" \ + --data-urlencode "cid=${NETDATA_REGISTRY_UNIQUE_ID}" \ + --data-urlencode "cs=${NETDATA_REGISTRY_UNIQUE_ID}" \ + --data "t=event" \ + --data "ni=1" \ + --data "an=anonymous-statistics" \ + --data-urlencode "av=${NETDATA_VERSION}" \ + --data-urlencode "ec=${ACTION}" \ + --data-urlencode "ea=${ACTION_RESULT}" \ + --data-urlencode "el=${ACTION_DATA}" \ + --data-urlencode "cd1=${NAME}" \ + --data-urlencode "cd2=${ID}" \ + --data-urlencode "cd3=${ID_LIKE}" \ + --data-urlencode "cd4=${VERSION}" \ + --data-urlencode "cd5=${VERSION_ID}" \ + --data-urlencode "cd6=${OS_DETECTION}" \ + --data-urlencode "cd7=${KERNEL_NAME}" \ + --data-urlencode "cd8=${KERNEL_VERSION}" \ + --data-urlencode "cd9=${ARCHITECTURE}" \ + --data-urlencode "cd10=${VIRTUALIZATION}" \ + --data-urlencode "cd11=${VIRT_DETECTION}" \ + --data-urlencode "cd12=${CONTAINER}" \ + --data-urlencode "cd13=${CONT_DETECTION}" \ + "https://www.google-analytics.com/collect" >/dev/null 2>&1 +else + wget -q -O - --timeout=1 "https://www.google-analytics.com/collect?\ +&v=1\ +&tid=UA-64295674-3\ +&aip=1\ +&ds=shell\ +&cid=${NETDATA_REGISTRY_UNIQUE_ID}\ +&cs=${NETDATA_REGISTRY_UNIQUE_ID}\ +&t=event\ +&ni=1\ +&an=anonymous-statistics\ +&av=${NETDATA_VERSION}\ +&ec=${ACTION}\ +&ea=${ACTION_RESULT}\ +&el=${ACTION_DATA}\ +&cd1=${NAME}\ +&cd2=${ID}\ +&cd3=${ID_LIKE}\ +&cd4=${VERSION}\ +&cd5=${VERSION_ID}\ +&cd6=${OS_DETECTION}\ +&cd7=${KERNEL_NAME}\ +&cd8=${KERNEL_VERSION}\ +&cd9=${ARCHITECTURE}\ +&cd10=${VIRTUALIZATION}\ +&cd11=${VIRT_DETECTION}\ +&cd12=${CONTAINER}\ +&cd13=${CONT_DETECTION}\ +" > /dev/null 2>&1 +fi diff --git a/daemon/common.h b/daemon/common.h index d912a30e7..d1172ad8d 100644 --- a/daemon/common.h +++ b/daemon/common.h @@ -78,5 +78,6 @@ extern char *netdata_configured_varlib_dir; extern char *netdata_configured_home_dir; extern char *netdata_configured_host_prefix; extern char *netdata_configured_timezone; +extern int netdata_anonymous_statistics_enabled; #endif /* NETDATA_COMMON_H */ diff --git a/daemon/config/README.md b/daemon/config/README.md old mode 100755 new mode 100644 index 5cd7844a2..64f8564cc --- a/daemon/config/README.md +++ b/daemon/config/README.md @@ -1,175 +1,146 @@ -# Configuration Guide +# Daemon configuration -Configuration files are placed in `/etc/netdata`. -## Netdata Daemon +
The daemon configuration file is read from `/etc/netdata/netdata.conf`. +Depending on your installation method, Netdata will have been installed either directly under `/`, or under `/opt/netdata`. The paths mentioned here and in the documentation in general assume that your installation is under `/`. If it is not, you will find the exact same paths under `/opt/netdata` as well. (i.e. `/etc/netdata` will be `/opt/netdata/etc/netdata`).
-The daemon configuration file is read from `/etc/netdata/netdata.conf`. +This config file **is not needed by default**. Netdata works fine out of the box without it. But it does allow you to adapt the general behavior of Netdata, in great detail. You can find all these settings, with their default values, by accessing the URL `https://netdata.server.hostname:19999/netdata.conf`. For example check the configuration file of [netdata.firehol.org](http://netdata.firehol.org/netdata.conf). HTTP access to this file is limited by default to private IPs, via the [web server access lists](../../web/server/#access-lists). -In this file you can configure all aspects of netdata. Netdata provides configuration settings for plugins and charts found when started. You can find all these settings, with their default values, by accessing the URL `https://netdata.server.hostname:19999/netdata.conf`. For example check the configuration file of [netdata.firehol.org](http://netdata.firehol.org/netdata.conf). +`netdata.conf` has sections stated with `[section]`. You will see the following sections: -The configuration file has sections stated with `[section]`. There will be the following sections: - -1. `[global]` for global netdata daemon options -2. `[plugins]` for controlling which plugins the netdata will use -3. `[plugin:NAME]` one such section for each plugin enabled -4. `[CHART_NAME]` once such section for each chart defined +1. `[global]` to [configure](#global-section-options) the [netdata daemon](../). +2. `[web]` to [configure the web server](../../web/server). +3. `[plugins]` to [configure](#plugins-section-options) which [collectors](../../collectors) to use and PATH settings. +4. `[health]` to [configure](#health-section-options) general settings for [health monitoring](../../health) +5. `[registry]` for the [netdata registry](../../registry). +6. `[backend]` to set up [streaming and replication](../../streaming) options. +7. `[statsd]` for the general settings of the [stats.d.plugin](../../collectors/statsd.plugin). +8. `[plugin:NAME]` sections for each collector plugin, under the comment [Per plugin configuration](#per-plugin-configuration). +9. `[CHART_NAME]` sections for each chart defined, under the comment [Per chart configuration](#per-chart-configuration). The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When you check the running configuration by accessing the URL `/netdata.conf` on your netdata server, netdata will add a comment on settings it does not currently use. -### [global] section options - - -setting | default | info -:------:|:-------:|:---- -hostname|auto-detected|The hostname of the computer running netdata. -history|3600|The number of entries the netdata daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](../../database/#netdata-database) for more information. -config directory|`/etc/netdata`|The directory configuration files are kept. -plugins directory|`/usr/libexec/netdata/plugins.d`|The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes. -web files directory|`/usr/share/netdata/web`|The directory the web static files are kept. -cache directory|`/var/cache/netdata`|The directory the memory database will be stored if and when netdata exits. Netdata will re-read the database when it will start again, to continue from the same point. -log directory|`/var/log/netdata`|The directory in which the [log files](../#log-files) are kept. -host access prefix|*empty*|This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43). -debug flags|0x00000000|Bitmap of debug options to enable. For more information check [Tracing Options](../#debugging). -memory deduplication (ksm)|yes|When set to `yes`, netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [[Memory Deduplication - Kernel Same Page Merging - KSM]] -debug log|`/var/log/netdata/debug.log`|The filename to save debug information. This file will not be created is debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](../#debugging). -error log|`/var/log/netdata/error.log`|The filename to save error messages for netdata daemon and all plugins (`stderr` is sent here for all netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log. -access log|`/var/log/netdata/access.log`|The filename to save the log of web clients accessing netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log. -memory mode|save|When set to `save` netdata will save its round robin database on exit and load it on startup. When set to `map` the cache files will be updated in real time (check `man mmap` - do not set this on systems with heavy load or slow disks - the disks will continuously sync the in-memory database of netdata). When set to `ram` the round robin database will be temporary and it will be lost when netdata exits. -update every|1|The frequency in seconds, for data collection. For more information see [Performance](../../doc/Performance.md#netdata-performance). -run as user|`netdata`|The user netdata will run as. -web files owner|`netdata`|The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, netdata will only serve files owned by user given in `run as user`. -http port listen backlog|100|The port backlog. Check `man 2 listen`. -default port|19999|The default port to listen for web clients. -bind to|`*`|The IP address and port to listen to. This is a space separated list of IPv4 or IPv6 address and ports. The default will bind to all IP addresses. Example: `bind to = 127.0.0.1:19999 10.11.12.1:19998 [::1]:19999`. -disconnect idle web clients after seconds|60|The time in seconds to disconnect web clients after being totally idle. -enable web responses gzip compression|yes|When set to `yes`, netdata web responses will be GZIP compressed, if the web client accepts such responses. - -##### netdata process priority - -By default, netdata runs with the `idle` process scheduler, which assigns CPU resources to netdata, only when the system has such resources to spare. - -The following `netdata.conf` settings control this: - -``` -[global] - process scheduling policy = idle - process scheduling priority = 0 - process nice level = 19 -``` - -The policies supported by netdata are `idle` (the netdata default), `other` (also as `nice`), `batch`, `rr`, `fifo`. netdata also recognizes `keep` and `none` to keep the current settings without changing them. - -For `other`, `nice` and `batch`, the setting `process nice level = 19` is activated to configure the nice level of netdata. Nice gets values -20 (highest) to 19 (lowest). - -For `rr` and `fifo`, the setting `process scheduling priority = 0` is activated to configure the priority of the relative scheduling policy. Priority gets values 1 (lowest) to 99 (highest). - -For the details of each scheduler, see `man sched_setscheduler` and `man sched`. +## Applying changes -When netdata is running under systemd, it can only lower its priority (the default is `other` with `nice level = 0`). If you want to make netdata to get more CPU than that, you will need to set in `netdata.conf`: +After `netdata.conf` has been modified, netdata needs to be restarted for changes to apply: -``` -[global] - process scheduling policy = keep +```bash +sudo service netdata restart ``` -and edit `/etc/systemd/system/netdata.service` and add: +If the above does not work, try the following: -``` -CPUSchedulingPolicy=other | batch | idle | fifo | rr -CPUSchedulingPriority=99 -Nice=-10 +```bash +sudo killall netdata; sleep 10; sudo netdata ``` +Please note that your data history will be lost if you have modified `history` parameter in section `[global]`. -### [plugins] section options +## Sections -In this section there will be a boolean (`yes`/`no`) option for each plugin. Additionally, there will be the following options: +### [global] section options setting | default | info :------:|:-------:|:---- -checks|no|This is a debugging plugin for the internal latency of netdata. -enable running new plugins|yes|When set to `yes`, netdata will enable plugins not configured specifically for them. Setting this to `no` will disable all plugins you have not set to `yes` explicitly. -check for new plugins every|60|The time in seconds to check for new plugins in the plugins directory. This allows having other applications dynamically creating plugins for netdata. - -## Netdata Plugins - -The configuration options for plugins appear in sections following the pattern `[plugin:NAME]`. - -### Internal Plugins - -Most internal plugins will provide additional options. Check [Internal Plugins](../../collectors/) for more information. +process scheduling policy | `keep` | See [netdata process scheduling policy](../#netdata-process-scheduling-policy) +OOM score | `1000` | See [OOM score](../#oom-score) +glibc malloc arena max for plugins | `1` | See [Virtual memory](../#virtual-memory). +glibc malloc arena max for netdata | `1` | See [Virtual memory](../#virtual-memory). +hostname | auto-detected | The hostname of the computer running netdata. +history | `3996` | The number of entries the netdata daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](../../database/#database) for more information. +update every | `1` | The frequency in seconds, for data collection. For more information see [Performance](../../docs/Performance.md#performance). +config directory | `/etc/netdata` | The directory configuration files are kept. +stock config directory | `/usr/lib/netdata/conf.d` | +log directory | `/var/log/netdata` | The directory in which the [log files](../#log-files) are kept. +web files directory | `/usr/share/netdata/web` | The directory the web static files are kept. +cache directory | `/var/cache/netdata` | The directory the memory database will be stored if and when netdata exits. Netdata will re-read the database when it will start again, to continue from the same point. +lib directory | `/var/lib/netdata` | Contains the alarm log and the netdata instance guid. +home directory | `/var/cache/netdata` | Contains the db files for the collected metrics +plugins directory | `"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"` | The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes. +memory mode | `save` | When set to `save` netdata will save its round robin database on exit and load it on startup. When set to `map` the cache files will be updated in real time (check `man mmap` - do not set this on systems with heavy load or slow disks - the disks will continuously sync the in-memory database of netdata). When set to `ram` the round robin database will be temporary and it will be lost when netdata exits. `none` disables the database at this host. This also disables health monitoring (there cannot be health monitoring without a database). host access prefix | | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43). +memory deduplication (ksm) | `yes` | When set to `yes`, netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](../../database/#ksm) +TZ environment variable | `:/etc/localtime` | Where to find the timezone +timezone | auto-detected | The timezone retrieved from the environment variable +debug flags | `0x0000000000000000` | Bitmap of debug options to enable. For more information check [Tracing Options](../#debugging). +debug log | `/var/log/netdata/debug.log` | The filename to save debug information. This file will not be created is debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](../#debugging). +error log | `/var/log/netdata/error.log` | The filename to save error messages for netdata daemon and all plugins (`stderr` is sent here for all netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log. +access log | `/var/log/netdata/access.log` | The filename to save the log of web clients accessing netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log. +errors flood protection period | `1200` | UNUSED - Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`. +errors to trigger flood protection | `200` | UNUSED - Number of errors written to the log in `errors flood protection period` sec before flood protection is activated. +run as user | `netdata` | The user netdata will run as. +pthread stack size | auto-detected | +cleanup obsolete charts after seconds | `3600` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers) +gap when lost iterations above | `1` | +cleanup orphan hosts after seconds | `3600` | How long to wait until automatically removing from the DB a remote netdata host (slave) that is no longer sending data. +delete obsolete charts files | `yes` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers) +delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal. + +### [web] section options + +Refer to the [web server documentation](../../web/server) +### [plugins] section options -### External Plugins +In this section you will see be a boolean (`yes`/`no`) option for each plugin (e.g. tc, cgroups, apps, proc etc.). Note that the configuration options in this section for the orchestrator plugins `python.d`, `charts.d` and `node.d` control **all the modules** written for that orchestrator. For instance, setting `python.d = no` means that all Python modules under `collectors/python.d.plugin` will be disabled. -External plugins will have only 2 options at `netdata.conf`: +Additionally, there will be the following options: setting | default | info :------:|:-------:|:---- -update every|the value of `[global].update every` setting|The frequency in seconds the plugin should collect values. For more information check [Performance](../../doc/Performance.md#netdata-performance). -command options|*empty*|Additional command line options to pass to the plugin. - -External plugins that need additional configuration may support a dedicated file in `/etc/netdata`. Check their documentation. - ---- - -## A note about netdata.conf - -This config file is not needed by default. You can just touch it (to be empty) to get rid of the error message displayed when missing. - -The whole idea came up when I was evaluating the documentation involved in maintaining a complex configuration system. My intention was to give configuration options for everything imaginable. But then, documenting all these options would require a tremendous amount of time, users would have to search through endless pages for the option they need, etc. +PATH environment variable | `auto-detected` | +PYTHONPATH environment variable | | Used to set a custom python path +enable running new plugins | `yes` | When set to `yes`, netdata will enable detected plugins, even if they are not configured explicitly. Setting this to `no` will only enable plugins explicitly configirued in this file with a `yes` +check for new plugins every | 60 | The time in seconds to check for new plugins in the plugins directory. This allows having other applications dynamically creating plugins for netdata. +checks | `no` | This is a debugging plugin for the internal latency -I concluded then that configuring software like that is a waste for time and effort. Of course there must be plenty of configuration options, but the implementation itself should require a lot less effort for both the devs and the users. +### [health] section options -So, I did this: +This section controls the general behavior of the health monitoring capabilities of Netdata. -1. No configuration is required to run netdata -2. There are plenty of options to tweak -3. There is minimal documentation (or no at all) +Specific alarms are configured in per-collector config files under the `health.d` directory. For more info, see [health monitoring](../../health/#health-monitoring). -### Why this works? +[Alarm notifications](../../health/notifications/#netdata-alarm-notifications) are configured in `health_alarm_notify.conf`. -The configuration file is a `name = value` dictionary with `[sections]`. Write whatever you like there as long as it follows this simple format. - -Netdata loads this dictionary and then when the code needs a value from it, it just looks up the `name` in the dictionary at the proper `section`. In all places, in the code, there are both the `names` and their `default values`, so if something is not found in the configuration file, the default is used. The lookup is made using B-Trees and hashes (no string comparisons), so they are super fast. Also the `names` of the settings can be `my super duper setting that once set to yes, will turn the world upside down = no` - so goodbye to most of the documentation involved. - -Next, netdata can generate a valid configuration for the user to edit. No need to remember anything. Just get the configuration from the server (`/netdata.conf` on your netdata server), edit it and save it. - -Last, what about options you believe you have set, but you misspelled? When you get the configuration file from the server, there will be a comment above all `name = value` pairs the server does not use. So you know that whatever you wrote there, is not used. +setting | default | info +:------:|:-------:|:---- +enabled | `yes` | Set to `no` to disable all alarms and notifications +in memory max health log entries | 1000 | Size of the alarm history held in RAM +script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alarm notifications. +stock health configuration directory | `/usr/lib/netdata/conf.d/health.d` | Contains the stock alarm configuration files for each collector +health configuration directory | `/etc/netdata/health.d` | The directory containing the user alarm configuration files, to override the stock configurations +run at least every seconds | `10` | Controls how often all alarm conditions should be evaluated. +postpone alarms during hibernation for seconds | `60` | Prevents false alarms. May need to be increased if you get alarms during hibernation. +rotate log every lines | 2000 | Controls the number of alarm log entries stored in `/health-log.db`, where is the one configured in the [[global] section](#global-section-options) -### limiting access to netdata.conf +### [registry] section options -netdata v1.9+ limit by default access to `http://your.netdata.ip:19999/netdata.conf` to private IP addresses. This is controlled by this settings: +To understand what this section is and how it should be configured, please refer to the [registry documentation](../../registry). -``` -[web] - allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.* -``` +### [backend] -The IPs listed are all the private IPv4 addresses, including link local IPv6 addresses. +Refer to the [streaming and replication](../../streaming) documentation. -> Keep in mind that connections to netdata API ports are filtered by `[web].allow connections from`. So, IPs allowed by `[web].allow netdata.conf from` should also be allowed by `[web].allow connections from`. +### Per plugin configuration +The configuration options for plugins appear in sections following the pattern `[plugin:NAME]`. -## netdata simple patterns +#### Internal plugins -Unix prefers regular expressions. But they are just too hard, too cryptic to use, write and understand. +Most internal plugins will provide additional options. Check [Internal Plugins](../../collectors/) for more information. -So, netdata supports [simple patterns](../../libnetdata/simple_pattern/). +#### External plugins -## Applying changes +External plugins will have only 2 options at `netdata.conf`: -After `netdata.conf` has been modified, netdata needs to be restarted for changes to apply: +setting | default | info +:------:|:-------:|:---- +update every|the value of `[global].update every` setting|The frequency in seconds the plugin should collect values. For more information check [Performance](../../docs/Performance.md#performance). +command options|*empty*|Additional command line options to pass to the plugin. -```bash -sudo service netdata restart -``` +External plugins that need additional configuration may support a dedicated file in `/etc/netdata`. Check their documentation. -If the above does not work, try the following: +### Per chart configuration -```bash -sudo killall netdata; sleep 10; sudo netdata -``` +In this section you will a separate subsection for each chart shown on the dashboard. You can control all aspects of a specific chart here. You can understand what each option does by reading [how charts are defined](../../collectors/plugins.d/#chart). If you don't know how to find the name of a chart, you can learn about it [here](../../docs/Charts.md). -Please note that your data history will be lost if you have modified `history` parameter in section `[global]`. +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/daemon/daemon.h b/daemon/daemon.h index 412691107..b65d2daa1 100644 --- a/daemon/daemon.h +++ b/daemon/daemon.h @@ -8,6 +8,7 @@ extern int become_user(const char *username, int pid_fd); extern int become_daemon(int dont_fork, const char *user); extern void netdata_cleanup_and_exit(int i); +extern void send_statistics(const char *action, const char *action_result, const char *action_data); extern char pidfile[]; diff --git a/daemon/global_statistics.c b/daemon/global_statistics.c index 68933e195..9933d0dfb 100644 --- a/daemon/global_statistics.c +++ b/daemon/global_statistics.c @@ -382,7 +382,7 @@ void global_statistics_charts(void) { , "netdata" , NULL , "NetData API Response Time" - , "ms/request" + , "milliseconds/request" , "netdata" , "stats" , 130400 diff --git a/daemon/main.c b/daemon/main.c index b2c4c80bf..9e9bc55c2 100644 --- a/daemon/main.c +++ b/daemon/main.c @@ -2,6 +2,8 @@ #include "common.h" +int netdata_anonymous_statistics_enabled; + struct config netdata_config = { .sections = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, @@ -22,6 +24,8 @@ void netdata_cleanup_and_exit(int ret) { error_log_limit_unlimited(); info("EXIT: netdata prepares to exit with code %d...", ret); + send_statistics("EXIT", ret?"ERROR":"OK","-"); + // cleanup/save the database and exit info("EXIT: cleaning up the database..."); rrdhost_cleanup_all(); @@ -67,8 +71,6 @@ struct netdata_static_thread static_threads[] = { // common plugins for all systems {"BACKENDS", NULL, NULL, 1, NULL, NULL, backends_main}, - {"WEB_SERVER[multi]", NULL, NULL, 1, NULL, NULL, socket_listen_main_multi_threaded}, - {"WEB_SERVER[single]", NULL, NULL, 0, NULL, NULL, socket_listen_main_single_threaded}, {"WEB_SERVER[static1]", NULL, NULL, 0, NULL, NULL, socket_listen_main_static_threaded}, {"STREAM", NULL, NULL, 0, NULL, NULL, rrdpush_sender_thread}, @@ -81,18 +83,10 @@ struct netdata_static_thread static_threads[] = { void web_server_threading_selection(void) { web_server_mode = web_server_mode_id(config_get(CONFIG_SECTION_WEB, "mode", web_server_mode_name(web_server_mode))); - int multi_threaded = (web_server_mode == WEB_SERVER_MODE_MULTI_THREADED); - int single_threaded = (web_server_mode == WEB_SERVER_MODE_SINGLE_THREADED); int static_threaded = (web_server_mode == WEB_SERVER_MODE_STATIC_THREADED); int i; for (i = 0; static_threads[i].name; i++) { - if (static_threads[i].start_routine == socket_listen_main_multi_threaded) - static_threads[i].enabled = multi_threaded; - - if (static_threads[i].start_routine == socket_listen_main_single_threaded) - static_threads[i].enabled = single_threaded; - if (static_threads[i].start_routine == socket_listen_main_static_threaded) static_threads[i].enabled = static_threaded; } @@ -113,6 +107,8 @@ void web_server_config_options(void) { web_allow_registry_from = simple_pattern_create(config_get(CONFIG_SECTION_REGISTRY, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT); web_allow_streaming_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow streaming from", "*"), NULL, SIMPLE_PATTERN_EXACT); web_allow_netdataconf_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow netdata.conf from", "localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.*"), NULL, SIMPLE_PATTERN_EXACT); + web_allow_mgmt_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow management from", "localhost"), NULL, SIMPLE_PATTERN_EXACT); + #ifdef NETDATA_WITH_ZLIB web_enable_gzip = config_get_boolean(CONFIG_SECTION_WEB, "enable gzip compression", web_enable_gzip); @@ -367,13 +363,6 @@ void log_init(void) { } static void backwards_compatible_config() { - // allow existing configurations to work with the current version of netdata - - if(config_exists(CONFIG_SECTION_GLOBAL, "multi threaded web server")) { - int mode = config_get_boolean(CONFIG_SECTION_GLOBAL, "multi threaded web server", 1); - web_server_mode = (mode)?WEB_SERVER_MODE_MULTI_THREADED:WEB_SERVER_MODE_SINGLE_THREADED; - } - // move [global] options to the [web] section config_move(CONFIG_SECTION_GLOBAL, "http port listen backlog", CONFIG_SECTION_WEB, "listen backlog"); @@ -473,6 +462,7 @@ static void get_netdata_configured_variables() { netdata_configured_plugins_dir_base = strdupz(config_get(CONFIG_SECTION_GLOBAL, "plugins directory", plugins_dirs)); quoted_strings_splitter(netdata_configured_plugins_dir_base, plugin_directories, PLUGINSD_MAX_DIRECTORIES, config_isspace); netdata_configured_plugins_dir = plugin_directories[0]; + } // ------------------------------------------------------------------------ @@ -596,6 +586,7 @@ void set_global_environment() { setenv("NETDATA_UPDATE_EVERY", b, 1); } + setenv("NETDATA_VERSION" , program_version, 1); setenv("NETDATA_HOSTNAME" , netdata_configured_hostname, 1); setenv("NETDATA_CONFIG_DIR" , verify_required_directory(netdata_configured_user_config_dir), 1); setenv("NETDATA_USER_CONFIG_DIR" , verify_required_directory(netdata_configured_user_config_dir), 1); @@ -658,6 +649,47 @@ static int load_netdata_conf(char *filename, char overwrite_used) { return ret; } + +void send_statistics( const char *action, const char *action_result, const char *action_data) { + static char *as_script; + if (netdata_anonymous_statistics_enabled == -1) { + char *optout_file = mallocz(sizeof(char) * (strlen(netdata_configured_user_config_dir) +strlen(".opt-out-from-anonymous-statistics") + 2)); + sprintf(optout_file, "%s/%s", netdata_configured_user_config_dir, ".opt-out-from-anonymous-statistics"); + if (likely(access(optout_file, R_OK) != 0)) { + as_script = mallocz(sizeof(char) * (strlen(netdata_configured_plugins_dir) + strlen("anonymous-statistics.sh") + 2)); + sprintf(as_script, "%s/%s", netdata_configured_plugins_dir, "anonymous-statistics.sh"); + if (unlikely(access(as_script, R_OK) != 0)) { + netdata_anonymous_statistics_enabled=0; + info("Anonymous statistics script %s not found.",as_script); + freez(as_script); + } else { + netdata_anonymous_statistics_enabled=1; + } + } else { + netdata_anonymous_statistics_enabled = 0; + as_script = NULL; + } + freez(optout_file); + } + if(!netdata_anonymous_statistics_enabled) return; + if (!action) return; + if (!action_result) action_result=""; + if (!action_data) action_data=""; + char *command_to_run=mallocz(sizeof(char) * (strlen(action) + strlen(action_result) + strlen(action_data) + strlen(as_script) + 10)); + pid_t command_pid; + + sprintf(command_to_run,"%s '%s' '%s' '%s'", as_script, action, action_result, action_data); + info("%s", command_to_run); + + FILE *fp = mypopen(command_to_run, &command_pid); + if(fp) { + char buffer[100 + 1]; + while (fgets(buffer, 100, fp) != NULL); + mypclose(fp, command_pid); + } + freez(command_to_run); +} + int main(int argc, char **argv) { int i; int config_loaded = 0; @@ -876,7 +908,6 @@ int main(int argc, char **argv) { load_netdata_conf(NULL, 0); } - backwards_compatible_config(); get_netdata_configured_variables(); const char *section = argv[optind]; @@ -931,6 +962,9 @@ int main(int argc, char **argv) { get_netdata_configured_variables(); set_global_environment(); + netdata_anonymous_statistics_enabled=-1; + send_statistics("START","-", "-"); + // work while we are cd into config_dir // to allow the plugins refer to their config // files using relative filenames @@ -1056,7 +1090,6 @@ int main(int argc, char **argv) { rrd_init(netdata_configured_hostname); - // ------------------------------------------------------------------------ // enable log flood protection diff --git a/daemon/main.h b/daemon/main.h index cb0bde6a9..687155981 100644 --- a/daemon/main.h +++ b/daemon/main.h @@ -43,5 +43,6 @@ struct netdata_static_thread { extern void cancel_main_threads(void); extern int killpid(pid_t pid, int signal); extern void netdata_cleanup_and_exit(int ret) NORETURN; +extern void send_statistics(const char *action, const char *action_result, const char *action_data); #endif /* NETDATA_MAIN_H */ diff --git a/database/Makefile.in b/database/Makefile.in deleted file mode 100644 index 4f5b710c5..000000000 --- a/database/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = database -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu database/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu database/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/database/README.md b/database/README.md index 68156f8a4..aedf4d520 100644 --- a/database/README.md +++ b/database/README.md @@ -1,4 +1,4 @@ -# Netdata database +# Database Although `netdata` does all its calculations using `long double`, it stores all values using a [custom-made 32-bit number](../libnetdata/storage_number/). @@ -204,3 +204,5 @@ Put the above lines in your boot sequence (`/etc/rc.local` or equivalent) to hav Netdata will create charts for kernel memory de-duplication performance, like this: ![image](https://cloud.githubusercontent.com/assets/2662304/11998786/eb23ae54-aab6-11e5-94d4-e848e8a5c56a.png) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdatabase%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/database/rrdcalc.h b/database/rrdcalc.h index 0c7cd0aa1..4df4381ae 100644 --- a/database/rrdcalc.h +++ b/database/rrdcalc.h @@ -25,6 +25,8 @@ #define RRDCALC_FLAG_WARN_ERROR 0x00000010 #define RRDCALC_FLAG_CRIT_ERROR 0x00000020 #define RRDCALC_FLAG_RUNNABLE 0x00000040 +#define RRDCALC_FLAG_DISABLED 0x00000080 +#define RRDCALC_FLAG_SILENCED 0x00000100 #define RRDCALC_FLAG_NO_CLEAR_NOTIFICATION 0x80000000 struct rrdcalc { diff --git a/database/rrdhost.c b/database/rrdhost.c index 43aa2daa2..7234db9a0 100644 --- a/database/rrdhost.c +++ b/database/rrdhost.c @@ -103,7 +103,6 @@ static inline void rrdhost_init_machine_guid(RRDHOST *host, const char *machine_ host->hash_machine_guid = simple_hash(host->machine_guid); } - // ---------------------------------------------------------------------------- // RRDHOST - add a host @@ -149,6 +148,7 @@ RRDHOST *rrdhost_create(const char *hostname, rrdhost_init_hostname(host, hostname); rrdhost_init_machine_guid(host, guid); + rrdhost_init_os(host, os); rrdhost_init_timezone(host, timezone); rrdhost_init_tags(host, tags); @@ -442,7 +442,7 @@ restart_after_removal: void rrd_init(char *hostname) { rrdset_free_obsolete_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds", rrdset_free_obsolete_time); gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_GLOBAL, "gap when lost iterations above", gap_when_lost_iterations_above); - if(gap_when_lost_iterations_above < 1) + if (gap_when_lost_iterations_above < 1) gap_when_lost_iterations_above = 1; health_init(); @@ -471,6 +471,7 @@ void rrd_init(char *hostname) { , 1 ); rrd_unlock(); + web_client_api_v1_management_init(); } // ---------------------------------------------------------------------------- diff --git a/database/rrdsetvar.c b/database/rrdsetvar.c index 1bb883f0b..9da419304 100644 --- a/database/rrdsetvar.c +++ b/database/rrdsetvar.c @@ -150,12 +150,12 @@ RRDSETVAR *rrdsetvar_custom_chart_variable_create(RRDSET *st, const char *name) if(hash == rs->hash && strcmp(n, rs->variable) == 0) { rrdset_unlock(st); if(rs->options & RRDVAR_OPTION_CUSTOM_CHART_VAR) { - free(n); + freez(n); return rs; } else { error("RRDSETVAR: custom variable '%s' on chart '%s' of host '%s', conflicts with an internal chart variable", n, st->id, host->hostname); - free(n); + freez(n); return NULL; } } diff --git a/database/rrdvar.c b/database/rrdvar.c index 951a38cac..600bd34c4 100644 --- a/database/rrdvar.c +++ b/database/rrdvar.c @@ -137,7 +137,7 @@ static RRDVAR *rrdvar_custom_variable_create(const char *scope, avl_tree_lock *t RRDVAR *rv = rrdvar_create_and_index(scope, tree_lock, name, RRDVAR_TYPE_CALCULATED, RRDVAR_OPTION_CUSTOM_HOST_VAR|RRDVAR_OPTION_ALLOCATED, v); if(unlikely(!rv)) { - free(v); + freez(v); debug(D_VARIABLES, "Requested variable '%s' already exists - possibly 2 plugins are updating it at the same time.", name); char *variable = strdupz(name); diff --git a/depcomp b/depcomp deleted file mode 100755 index 4ebd5b3a2..000000000 --- a/depcomp +++ /dev/null @@ -1,791 +0,0 @@ -#! /bin/sh -# depcomp - compile a program generating dependencies as side-effects - -scriptversion=2013-05-30.07; # UTC - -# Copyright (C) 1999-2013 Free Software Foundation, Inc. - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -# Originally written by Alexandre Oliva . - -case $1 in - '') - echo "$0: No command. Try '$0 --help' for more information." 1>&2 - exit 1; - ;; - -h | --h*) - cat <<\EOF -Usage: depcomp [--help] [--version] PROGRAM [ARGS] - -Run PROGRAMS ARGS to compile a file, generating dependencies -as side-effects. - -Environment variables: - depmode Dependency tracking mode. - source Source file read by 'PROGRAMS ARGS'. - object Object file output by 'PROGRAMS ARGS'. - DEPDIR directory where to store dependencies. - depfile Dependency file to output. - tmpdepfile Temporary file to use when outputting dependencies. - libtool Whether libtool is used (yes/no). - -Report bugs to . -EOF - exit $? - ;; - -v | --v*) - echo "depcomp $scriptversion" - exit $? - ;; -esac - -# Get the directory component of the given path, and save it in the -# global variables '$dir'. Note that this directory component will -# be either empty or ending with a '/' character. This is deliberate. -set_dir_from () -{ - case $1 in - */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;; - *) dir=;; - esac -} - -# Get the suffix-stripped basename of the given path, and save it the -# global variable '$base'. -set_base_from () -{ - base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'` -} - -# If no dependency file was actually created by the compiler invocation, -# we still have to create a dummy depfile, to avoid errors with the -# Makefile "include basename.Plo" scheme. -make_dummy_depfile () -{ - echo "#dummy" > "$depfile" -} - -# Factor out some common post-processing of the generated depfile. -# Requires the auxiliary global variable '$tmpdepfile' to be set. -aix_post_process_depfile () -{ - # If the compiler actually managed to produce a dependency file, - # post-process it. - if test -f "$tmpdepfile"; then - # Each line is of the form 'foo.o: dependency.h'. - # Do two passes, one to just change these to - # $object: dependency.h - # and one to simply output - # dependency.h: - # which is needed to avoid the deleted-header problem. - { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile" - sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile" - } > "$depfile" - rm -f "$tmpdepfile" - else - make_dummy_depfile - fi -} - -# A tabulation character. -tab=' ' -# A newline character. -nl=' -' -# Character ranges might be problematic outside the C locale. -# These definitions help. -upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ -lower=abcdefghijklmnopqrstuvwxyz -digits=0123456789 -alpha=${upper}${lower} - -if test -z "$depmode" || test -z "$source" || test -z "$object"; then - echo "depcomp: Variables source, object and depmode must be set" 1>&2 - exit 1 -fi - -# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. -depfile=${depfile-`echo "$object" | - sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} -tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} - -rm -f "$tmpdepfile" - -# Avoid interferences from the environment. -gccflag= dashmflag= - -# Some modes work just like other modes, but use different flags. We -# parameterize here, but still list the modes in the big case below, -# to make depend.m4 easier to write. Note that we *cannot* use a case -# here, because this file can only contain one case statement. -if test "$depmode" = hp; then - # HP compiler uses -M and no extra arg. - gccflag=-M - depmode=gcc -fi - -if test "$depmode" = dashXmstdout; then - # This is just like dashmstdout with a different argument. - dashmflag=-xM - depmode=dashmstdout -fi - -cygpath_u="cygpath -u -f -" -if test "$depmode" = msvcmsys; then - # This is just like msvisualcpp but w/o cygpath translation. - # Just convert the backslash-escaped backslashes to single forward - # slashes to satisfy depend.m4 - cygpath_u='sed s,\\\\,/,g' - depmode=msvisualcpp -fi - -if test "$depmode" = msvc7msys; then - # This is just like msvc7 but w/o cygpath translation. - # Just convert the backslash-escaped backslashes to single forward - # slashes to satisfy depend.m4 - cygpath_u='sed s,\\\\,/,g' - depmode=msvc7 -fi - -if test "$depmode" = xlc; then - # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information. - gccflag=-qmakedep=gcc,-MF - depmode=gcc -fi - -case "$depmode" in -gcc3) -## gcc 3 implements dependency tracking that does exactly what -## we want. Yay! Note: for some reason libtool 1.4 doesn't like -## it if -MD -MP comes after the -MF stuff. Hmm. -## Unfortunately, FreeBSD c89 acceptance of flags depends upon -## the command line argument order; so add the flags where they -## appear in depend2.am. Note that the slowdown incurred here -## affects only configure: in makefiles, %FASTDEP% shortcuts this. - for arg - do - case $arg in - -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; - *) set fnord "$@" "$arg" ;; - esac - shift # fnord - shift # $arg - done - "$@" - stat=$? - if test $stat -ne 0; then - rm -f "$tmpdepfile" - exit $stat - fi - mv "$tmpdepfile" "$depfile" - ;; - -gcc) -## Note that this doesn't just cater to obsosete pre-3.x GCC compilers. -## but also to in-use compilers like IMB xlc/xlC and the HP C compiler. -## (see the conditional assignment to $gccflag above). -## There are various ways to get dependency output from gcc. Here's -## why we pick this rather obscure method: -## - Don't want to use -MD because we'd like the dependencies to end -## up in a subdir. Having to rename by hand is ugly. -## (We might end up doing this anyway to support other compilers.) -## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like -## -MM, not -M (despite what the docs say). Also, it might not be -## supported by the other compilers which use the 'gcc' depmode. -## - Using -M directly means running the compiler twice (even worse -## than renaming). - if test -z "$gccflag"; then - gccflag=-MD, - fi - "$@" -Wp,"$gccflag$tmpdepfile" - stat=$? - if test $stat -ne 0; then - rm -f "$tmpdepfile" - exit $stat - fi - rm -f "$depfile" - echo "$object : \\" > "$depfile" - # The second -e expression handles DOS-style file names with drive - # letters. - sed -e 's/^[^:]*: / /' \ - -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" -## This next piece of magic avoids the "deleted header file" problem. -## The problem is that when a header file which appears in a .P file -## is deleted, the dependency causes make to die (because there is -## typically no way to rebuild the header). We avoid this by adding -## dummy dependencies for each header file. Too bad gcc doesn't do -## this for us directly. -## Some versions of gcc put a space before the ':'. On the theory -## that the space means something, we add a space to the output as -## well. hp depmode also adds that space, but also prefixes the VPATH -## to the object. Take care to not repeat it in the output. -## Some versions of the HPUX 10.20 sed can't process this invocation -## correctly. Breaking it into two sed invocations is a workaround. - tr ' ' "$nl" < "$tmpdepfile" \ - | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ - | sed -e 's/$/ :/' >> "$depfile" - rm -f "$tmpdepfile" - ;; - -hp) - # This case exists only to let depend.m4 do its work. It works by - # looking at the text of this script. This case will never be run, - # since it is checked for above. - exit 1 - ;; - -sgi) - if test "$libtool" = yes; then - "$@" "-Wp,-MDupdate,$tmpdepfile" - else - "$@" -MDupdate "$tmpdepfile" - fi - stat=$? - if test $stat -ne 0; then - rm -f "$tmpdepfile" - exit $stat - fi - rm -f "$depfile" - - if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files - echo "$object : \\" > "$depfile" - # Clip off the initial element (the dependent). Don't try to be - # clever and replace this with sed code, as IRIX sed won't handle - # lines with more than a fixed number of characters (4096 in - # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; - # the IRIX cc adds comments like '#:fec' to the end of the - # dependency line. - tr ' ' "$nl" < "$tmpdepfile" \ - | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \ - | tr "$nl" ' ' >> "$depfile" - echo >> "$depfile" - # The second pass generates a dummy entry for each header file. - tr ' ' "$nl" < "$tmpdepfile" \ - | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ - >> "$depfile" - else - make_dummy_depfile - fi - rm -f "$tmpdepfile" - ;; - -xlc) - # This case exists only to let depend.m4 do its work. It works by - # looking at the text of this script. This case will never be run, - # since it is checked for above. - exit 1 - ;; - -aix) - # The C for AIX Compiler uses -M and outputs the dependencies - # in a .u file. In older versions, this file always lives in the - # current directory. Also, the AIX compiler puts '$object:' at the - # start of each line; $object doesn't have directory information. - # Version 6 uses the directory in both cases. - set_dir_from "$object" - set_base_from "$object" - if test "$libtool" = yes; then - tmpdepfile1=$dir$base.u - tmpdepfile2=$base.u - tmpdepfile3=$dir.libs/$base.u - "$@" -Wc,-M - else - tmpdepfile1=$dir$base.u - tmpdepfile2=$dir$base.u - tmpdepfile3=$dir$base.u - "$@" -M - fi - stat=$? - if test $stat -ne 0; then - rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" - exit $stat - fi - - for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" - do - test -f "$tmpdepfile" && break - done - aix_post_process_depfile - ;; - -tcc) - # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26 - # FIXME: That version still under development at the moment of writing. - # Make that this statement remains true also for stable, released - # versions. - # It will wrap lines (doesn't matter whether long or short) with a - # trailing '\', as in: - # - # foo.o : \ - # foo.c \ - # foo.h \ - # - # It will put a trailing '\' even on the last line, and will use leading - # spaces rather than leading tabs (at least since its commit 0394caf7 - # "Emit spaces for -MD"). - "$@" -MD -MF "$tmpdepfile" - stat=$? - if test $stat -ne 0; then - rm -f "$tmpdepfile" - exit $stat - fi - rm -f "$depfile" - # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'. - # We have to change lines of the first kind to '$object: \'. - sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile" - # And for each line of the second kind, we have to emit a 'dep.h:' - # dummy dependency, to avoid the deleted-header problem. - sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile" - rm -f "$tmpdepfile" - ;; - -## The order of this option in the case statement is important, since the -## shell code in configure will try each of these formats in the order -## listed in this file. A plain '-MD' option would be understood by many -## compilers, so we must ensure this comes after the gcc and icc options. -pgcc) - # Portland's C compiler understands '-MD'. - # Will always output deps to 'file.d' where file is the root name of the - # source file under compilation, even if file resides in a subdirectory. - # The object file name does not affect the name of the '.d' file. - # pgcc 10.2 will output - # foo.o: sub/foo.c sub/foo.h - # and will wrap long lines using '\' : - # foo.o: sub/foo.c ... \ - # sub/foo.h ... \ - # ... - set_dir_from "$object" - # Use the source, not the object, to determine the base name, since - # that's sadly what pgcc will do too. - set_base_from "$source" - tmpdepfile=$base.d - - # For projects that build the same source file twice into different object - # files, the pgcc approach of using the *source* file root name can cause - # problems in parallel builds. Use a locking strategy to avoid stomping on - # the same $tmpdepfile. - lockdir=$base.d-lock - trap " - echo '$0: caught signal, cleaning up...' >&2 - rmdir '$lockdir' - exit 1 - " 1 2 13 15 - numtries=100 - i=$numtries - while test $i -gt 0; do - # mkdir is a portable test-and-set. - if mkdir "$lockdir" 2>/dev/null; then - # This process acquired the lock. - "$@" -MD - stat=$? - # Release the lock. - rmdir "$lockdir" - break - else - # If the lock is being held by a different process, wait - # until the winning process is done or we timeout. - while test -d "$lockdir" && test $i -gt 0; do - sleep 1 - i=`expr $i - 1` - done - fi - i=`expr $i - 1` - done - trap - 1 2 13 15 - if test $i -le 0; then - echo "$0: failed to acquire lock after $numtries attempts" >&2 - echo "$0: check lockdir '$lockdir'" >&2 - exit 1 - fi - - if test $stat -ne 0; then - rm -f "$tmpdepfile" - exit $stat - fi - rm -f "$depfile" - # Each line is of the form `foo.o: dependent.h', - # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. - # Do two passes, one to just change these to - # `$object: dependent.h' and one to simply `dependent.h:'. - sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" - # Some versions of the HPUX 10.20 sed can't process this invocation - # correctly. Breaking it into two sed invocations is a workaround. - sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \ - | sed -e 's/$/ :/' >> "$depfile" - rm -f "$tmpdepfile" - ;; - -hp2) - # The "hp" stanza above does not work with aCC (C++) and HP's ia64 - # compilers, which have integrated preprocessors. The correct option - # to use with these is +Maked; it writes dependencies to a file named - # 'foo.d', which lands next to the object file, wherever that - # happens to be. - # Much of this is similar to the tru64 case; see comments there. - set_dir_from "$object" - set_base_from "$object" - if test "$libtool" = yes; then - tmpdepfile1=$dir$base.d - tmpdepfile2=$dir.libs/$base.d - "$@" -Wc,+Maked - else - tmpdepfile1=$dir$base.d - tmpdepfile2=$dir$base.d - "$@" +Maked - fi - stat=$? - if test $stat -ne 0; then - rm -f "$tmpdepfile1" "$tmpdepfile2" - exit $stat - fi - - for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" - do - test -f "$tmpdepfile" && break - done - if test -f "$tmpdepfile"; then - sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile" - # Add 'dependent.h:' lines. - sed -ne '2,${ - s/^ *// - s/ \\*$// - s/$/:/ - p - }' "$tmpdepfile" >> "$depfile" - else - make_dummy_depfile - fi - rm -f "$tmpdepfile" "$tmpdepfile2" - ;; - -tru64) - # The Tru64 compiler uses -MD to generate dependencies as a side - # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. - # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put - # dependencies in 'foo.d' instead, so we check for that too. - # Subdirectories are respected. - set_dir_from "$object" - set_base_from "$object" - - if test "$libtool" = yes; then - # Libtool generates 2 separate objects for the 2 libraries. These - # two compilations output dependencies in $dir.libs/$base.o.d and - # in $dir$base.o.d. We have to check for both files, because - # one of the two compilations can be disabled. We should prefer - # $dir$base.o.d over $dir.libs/$base.o.d because the latter is - # automatically cleaned when .libs/ is deleted, while ignoring - # the former would cause a distcleancheck panic. - tmpdepfile1=$dir$base.o.d # libtool 1.5 - tmpdepfile2=$dir.libs/$base.o.d # Likewise. - tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504 - "$@" -Wc,-MD - else - tmpdepfile1=$dir$base.d - tmpdepfile2=$dir$base.d - tmpdepfile3=$dir$base.d - "$@" -MD - fi - - stat=$? - if test $stat -ne 0; then - rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" - exit $stat - fi - - for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" - do - test -f "$tmpdepfile" && break - done - # Same post-processing that is required for AIX mode. - aix_post_process_depfile - ;; - -msvc7) - if test "$libtool" = yes; then - showIncludes=-Wc,-showIncludes - else - showIncludes=-showIncludes - fi - "$@" $showIncludes > "$tmpdepfile" - stat=$? - grep -v '^Note: including file: ' "$tmpdepfile" - if test $stat -ne 0; then - rm -f "$tmpdepfile" - exit $stat - fi - rm -f "$depfile" - echo "$object : \\" > "$depfile" - # The first sed program below extracts the file names and escapes - # backslashes for cygpath. The second sed program outputs the file - # name when reading, but also accumulates all include files in the - # hold buffer in order to output them again at the end. This only - # works with sed implementations that can handle large buffers. - sed < "$tmpdepfile" -n ' -/^Note: including file: *\(.*\)/ { - s//\1/ - s/\\/\\\\/g - p -}' | $cygpath_u | sort -u | sed -n ' -s/ /\\ /g -s/\(.*\)/'"$tab"'\1 \\/p -s/.\(.*\) \\/\1:/ -H -$ { - s/.*/'"$tab"'/ - G - p -}' >> "$depfile" - echo >> "$depfile" # make sure the fragment doesn't end with a backslash - rm -f "$tmpdepfile" - ;; - -msvc7msys) - # This case exists only to let depend.m4 do its work. It works by - # looking at the text of this script. This case will never be run, - # since it is checked for above. - exit 1 - ;; - -#nosideeffect) - # This comment above is used by automake to tell side-effect - # dependency tracking mechanisms from slower ones. - -dashmstdout) - # Important note: in order to support this mode, a compiler *must* - # always write the preprocessed file to stdout, regardless of -o. - "$@" || exit $? - - # Remove the call to Libtool. - if test "$libtool" = yes; then - while test "X$1" != 'X--mode=compile'; do - shift - done - shift - fi - - # Remove '-o $object'. - IFS=" " - for arg - do - case $arg in - -o) - shift - ;; - $object) - shift - ;; - *) - set fnord "$@" "$arg" - shift # fnord - shift # $arg - ;; - esac - done - - test -z "$dashmflag" && dashmflag=-M - # Require at least two characters before searching for ':' - # in the target name. This is to cope with DOS-style filenames: - # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. - "$@" $dashmflag | - sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile" - rm -f "$depfile" - cat < "$tmpdepfile" > "$depfile" - # Some versions of the HPUX 10.20 sed can't process this sed invocation - # correctly. Breaking it into two sed invocations is a workaround. - tr ' ' "$nl" < "$tmpdepfile" \ - | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ - | sed -e 's/$/ :/' >> "$depfile" - rm -f "$tmpdepfile" - ;; - -dashXmstdout) - # This case only exists to satisfy depend.m4. It is never actually - # run, as this mode is specially recognized in the preamble. - exit 1 - ;; - -makedepend) - "$@" || exit $? - # Remove any Libtool call - if test "$libtool" = yes; then - while test "X$1" != 'X--mode=compile'; do - shift - done - shift - fi - # X makedepend - shift - cleared=no eat=no - for arg - do - case $cleared in - no) - set ""; shift - cleared=yes ;; - esac - if test $eat = yes; then - eat=no - continue - fi - case "$arg" in - -D*|-I*) - set fnord "$@" "$arg"; shift ;; - # Strip any option that makedepend may not understand. Remove - # the object too, otherwise makedepend will parse it as a source file. - -arch) - eat=yes ;; - -*|$object) - ;; - *) - set fnord "$@" "$arg"; shift ;; - esac - done - obj_suffix=`echo "$object" | sed 's/^.*\././'` - touch "$tmpdepfile" - ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" - rm -f "$depfile" - # makedepend may prepend the VPATH from the source file name to the object. - # No need to regex-escape $object, excess matching of '.' is harmless. - sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" - # Some versions of the HPUX 10.20 sed can't process the last invocation - # correctly. Breaking it into two sed invocations is a workaround. - sed '1,2d' "$tmpdepfile" \ - | tr ' ' "$nl" \ - | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ - | sed -e 's/$/ :/' >> "$depfile" - rm -f "$tmpdepfile" "$tmpdepfile".bak - ;; - -cpp) - # Important note: in order to support this mode, a compiler *must* - # always write the preprocessed file to stdout. - "$@" || exit $? - - # Remove the call to Libtool. - if test "$libtool" = yes; then - while test "X$1" != 'X--mode=compile'; do - shift - done - shift - fi - - # Remove '-o $object'. - IFS=" " - for arg - do - case $arg in - -o) - shift - ;; - $object) - shift - ;; - *) - set fnord "$@" "$arg" - shift # fnord - shift # $arg - ;; - esac - done - - "$@" -E \ - | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ - -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ - | sed '$ s: \\$::' > "$tmpdepfile" - rm -f "$depfile" - echo "$object : \\" > "$depfile" - cat < "$tmpdepfile" >> "$depfile" - sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" - rm -f "$tmpdepfile" - ;; - -msvisualcpp) - # Important note: in order to support this mode, a compiler *must* - # always write the preprocessed file to stdout. - "$@" || exit $? - - # Remove the call to Libtool. - if test "$libtool" = yes; then - while test "X$1" != 'X--mode=compile'; do - shift - done - shift - fi - - IFS=" " - for arg - do - case "$arg" in - -o) - shift - ;; - $object) - shift - ;; - "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") - set fnord "$@" - shift - shift - ;; - *) - set fnord "$@" "$arg" - shift - shift - ;; - esac - done - "$@" -E 2>/dev/null | - sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" - rm -f "$depfile" - echo "$object : \\" > "$depfile" - sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" - echo "$tab" >> "$depfile" - sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" - rm -f "$tmpdepfile" - ;; - -msvcmsys) - # This case exists only to let depend.m4 do its work. It works by - # looking at the text of this script. This case will never be run, - # since it is checked for above. - exit 1 - ;; - -none) - exec "$@" - ;; - -*) - echo "Unknown depmode $depmode" 1>&2 - exit 1 - ;; -esac - -exit 0 - -# Local Variables: -# mode: shell-script -# sh-indentation: 2 -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" -# time-stamp-end: "; # UTC" -# End: diff --git a/diagrams/Makefile.in b/diagrams/Makefile.in deleted file mode 100644 index 5c7ed2ac9..000000000 --- a/diagrams/Makefile.in +++ /dev/null @@ -1,482 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = diagrams -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -SCRIPTS = $(dist_noinst_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - config.puml \ - registry.puml \ - netdata-for-ephemeral-nodes.xml \ - netdata-proxies-example.xml \ - netdata-overview.xml \ - data_structures/netdata_config.svg \ - data_structures/README.md \ - data_structures/registry.svg \ - data_structures/rrd.svg \ - data_structures/web.svg \ - data_structures/src/netdata_config.xml \ - data_structures/src/registry.xml \ - data_structures/src/rrd.xml \ - data_structures/src/web.xml \ - $(NULL) - -dist_noinst_SCRIPTS = \ - build.sh \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu diagrams/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu diagrams/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/diagrams/data_structures/README.md b/diagrams/data_structures/README.md old mode 100755 new mode 100644 index d8d694835..bb56ca16a --- a/diagrams/data_structures/README.md +++ b/diagrams/data_structures/README.md @@ -9,3 +9,5 @@ These are the main internal data structures of `netdata`. Created with `draw.io` ![RRD](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/rrd.svg?sanitize=true) ![Web](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/web.svg?sanitize=true) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdiagrams%2Fdata_structures%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/diagrams/data_structures/netdata_config.svg b/diagrams/data_structures/netdata_config.svg old mode 100755 new mode 100644 diff --git a/diagrams/data_structures/registry.svg b/diagrams/data_structures/registry.svg old mode 100755 new mode 100644 diff --git a/diagrams/data_structures/rrd.svg b/diagrams/data_structures/rrd.svg old mode 100755 new mode 100644 diff --git a/diagrams/data_structures/src/netdata_config.xml b/diagrams/data_structures/src/netdata_config.xml old mode 100755 new mode 100644 diff --git a/diagrams/data_structures/src/registry.xml b/diagrams/data_structures/src/registry.xml old mode 100755 new mode 100644 diff --git a/diagrams/data_structures/src/rrd.xml b/diagrams/data_structures/src/rrd.xml old mode 100755 new mode 100644 diff --git a/diagrams/data_structures/src/web.xml b/diagrams/data_structures/src/web.xml old mode 100755 new mode 100644 diff --git a/diagrams/data_structures/web.svg b/diagrams/data_structures/web.svg old mode 100755 new mode 100644 diff --git a/doc/Add-more-charts-to-netdata.md b/doc/Add-more-charts-to-netdata.md deleted file mode 100644 index 1512a25e7..000000000 --- a/doc/Add-more-charts-to-netdata.md +++ /dev/null @@ -1,429 +0,0 @@ -# Add more charts to netdata - -netdata collects system metrics by itself. It has many [internal plugins](../collectors) for collecting most of the metrics presented by default when it starts, collecting data from `/proc`, `/sys` and other Linux kernel sources. - -To collect non-system metrics, netdata supports a plugin architecture. The following are the currently available external plugins: - -- **[Web Servers](#web-servers)**, such as apache, nginx, nginx_plus, tomcat, litespeed -- **[Web Logs](#web-log-parsers)**, such as apache, nginx, lighttpd, gunicorn, squid access logs, apache cache.log -- **[Load Balancers](#load-balancers)**, like haproxy -- **[Message Brokers](#message-brokers)**, like rabbitmq, beanstalkd -- **[Database Servers](#database-servers)**, such as mysql, mariadb, postgres, couchdb, mongodb, rethinkdb -- **[Social Sharing Servers](#social-sharing-servers)**, like retroshare -- **[Proxy Servers](#proxy-servers)**, like squid -- **[HTTP accelerators](#http-accelerators)**, like varnish cache -- **[Search engines](#search-engines)**, like elasticsearch -- **[Name Servers](#name-servers)** (DNS), like bind, nsd, powerdns, dnsdist -- **[DHCP Servers](#dhcp-servers)**, like ISC DHCP -- **[UPS](#ups)**, such as APC UPS, NUT -- **[RAID](#raid)**, such as linux software raid (mdadm), MegaRAID -- **[Mail Servers](#mail-servers)**, like postfix, exim, dovecot -- **[File Servers](#file-servers)**, like samba, NFS, ftp, sftp, WebDAV -- **[System](#system)**, for processes and other system metrics -- **[Sensors](#sensors)**, like temperature, fans speed, voltage, humidity, HDD/SSD S.M.A.R.T attributes -- **[Network](#network)**, such as SNMP devices, `fping`, access points, dns_query_time -- **[Time Servers](#time-servers)**, like chrony -- **[Security](#security)**, like FreeRADIUS, OpenVPN, Fail2ban -- **[Telephony Servers](#telephony-servers)**, like openSIPS -- **[Go applications](#go-applications)** -- **[Household appliances](#household-appliances)**, like SMA WebBox (solar power), Fronius Symo solar power, Stiebel Eltron heating -- **[Java Processes](#java-processes)**, via JMX or Spring Boot Actuator -- **[Provisioning Systems](#provisioning-systems)**, like Puppet -- **[Game Servers](#game-servers)**, like SpigotMC -- **[Distributed Computing Clients](#distributed-computing-clients)**, like BOINC -- **[Skeleton Plugins](#skeleton-plugins)**, for writing your own data collectors - -Check also [Third Party Plugins](Third-Party-Plugins.md) for a list of plugins distributed by third parties. - -## configuring plugins - -netdata comes with **internal** and **external** plugins: - -1. The **internal** ones are written in `C` and run as threads within the netdata daemon. -2. The **external** ones can be written in any computer language. The netdata daemon spawns these as processes (shown with `ps fax`) and reads their metrics using pipes (so the `stdout` of external plugins is connected to netdata for metrics collection and the `stderr` of external plugins is connected to `/var/log/netdata/error.log`). - -To make it easier to develop plugins, and minimize the number of threads and processes running, netdata supports **plugin orchestrators**, each of them supporting one or more data collection **modules**. Currently we ship plugin orchestrators for 4 languages: `C`, `python`, `node.js` and `bash` and 2 more are under development (`go` and `java`). - -#### enabling and disabling plugins - -To control which plugins netdata run, edit `netdata.conf` and check the `[plugins]` section. It looks like this: - -``` -[plugins] - # enable running new plugins = yes - # check for new plugins every = 60 - # proc = yes - # diskspace = yes - # cgroups = yes - # tc = yes - # nfacct = yes - # idlejitter = yes - # freeipmi = yes - # node.d = yes - # python.d = yes - # fping = yes - # charts.d = yes - # apps = yes -``` - -The default for all plugins is the option `enable running new plugins`. So, setting this to `no` will disable all the plugins, except the ones specifically enabled. - -#### enabling and disabling modules - -Each of the **plugins** may support one or more data collection **modules**. To control which of its modules run, you have to consult the configuration of the **plugin** (see table below). - -#### modules configuration - -Most **modules** come with **auto-detection**, configured to work out-of-the-box on popular operating systems with the default settings. - -However, there are cases that auto-detection fails. Usually the reason is that the applications to be monitored do not allow netdata to connect. In most of the cases, allowing the user `netdata` from `localhost` to connect and collect metrics, will automatically enable data collection for the application in question (it will require a netdata restart). - -You can verify netdata **external plugins and their modules** are able to collect metrics, following this procedure: - -```sh -# become user netdata -sudo su -s /bin/bash netdata - -# execute the plugin in debug mode, for a specific module. -# example for the python plugin, mysql module: -/usr/libexec/netdata/plugins.d/python.d.plugin 1 debug trace mysql -``` - -Similarly, you can use `charts.d.plugin` for BASH plugins and `node.d.plugin` for node.js plugins. -Other plugins (like `apps.plugin`, `freeipmi.plugin`, `fping.plugin`) use the native netdata plugin API and can be run directly. - -If you need to configure a netdata plugin or module, all user supplied configuration is kept at `/etc/netdata` while the stock versions of all files is at `/usr/lib/netdata/conf.d`. -To copy a stock file and edit it, run `/etc/netdata/edit-config`. Running this command without an argument, will list the available stock files. - -Each file should provide plenty of examples and documentation about each module and plugin. - -This is a map of the all supported configuration options: - -#### map of configuration files - -plugin | language | plugin
configuration | modules
configuration | ----:|:---:|:---:|:---| -`apps.plugin`
(external plugin for monitoring the process tree on Linux and FreeBSD)|`C`|`netdata.conf` section `[plugin:apps]`|Custom configuration for the processes to be monitored at `apps_groups.conf` -`freebsd.plugin`
(internal plugin for monitoring FreeBSD system resources)|`C`|`netdata.conf` section `[plugin:freebsd]`|one section for each module `[plugin:freebsd:MODULE]`. Each module may provide additional sections in the form of `[plugin:freebsd:MODULE:SUBSECTION]`. -`cgroups.plugin`
(internal plugin for monitoring Linux containers, VMs and systemd services)|`C`|`netdata.conf` section `[plugin:cgroups]`|N/A -`charts.d.plugin`
(external plugin orchestrator for BASH modules)|`BASH`|`charts.d.conf`|a file for each module in `/etc/netdata/charts.d/` -`diskspace.plugin`
(internal plugin for collecting Linux mount points usage)|`C`|`netdata.conf` section `[plugin:diskspace]`|N/A -`fping.plugin`
(external plugin for collecting network latencies)|`C`|`fping.conf`|This plugin is a wrapper for the `fping` command. -`freeipmi.plugin`
(external plugin for collecting IPMI h/w sensors)|`C`|`netdata.conf` section `[plugin:freeipmi]` -`idlejitter.plugin`
(internal plugin for monitoring CPU jitter)|`C`|N/A|N/A -`macos.plugin`
(internal plugin for monitoring MacOS system resources)|`C`|`netdata.conf` section `[plugin:macos]`|one section for each module `[plugin:macos:MODULE]`. Each module may provide additional sections in the form of `[plugin:macos:MODULE:SUBSECTION]`. -`node.d.plugin`
(external plugin orchestrator of node.js modules)|`node.js`|`node.d.conf`|a file for each module in `/etc/netdata/node.d/`. -`proc.plugin`
(internal plugin for monitoring Linux system resources)|`C`|`netdata.conf` section `[plugin:proc]`|one section for each module `[plugin:proc:MODULE]`. Each module may provide additional sections in the form of `[plugin:proc:MODULE:SUBSECTION]`. -`python.d.plugin`
(external plugin orchestrator for running python modules)|`python`
v2 or v3
both are supported|`python.d.conf`|a file for each module in `/etc/netdata/python.d/`. -`statsd.plugin`
(internal plugin for collecting statsd metrics)|`C`|`netdata.conf` section `[statsd]`|Synthetic statsd charts can be configured with files in `/etc/netdata/statsd.d/`. -`tc.plugin`
(internal plugin for collecting Linux traffic QoS)|`C`|`netdata.conf` section `[plugin:tc]`|The plugin runs an external helper called `tc-qos-helper.sh` to interface with the `tc` command. This helper supports a few additional options using `tc-qos-helper.conf`. - - -## writing data collection modules - -You can add custom plugins following the [External Plugins Guide](../collectors/plugins.d/). - ---- - -## available data collection modules - -These are all the data collection plugins currently available. - -### Web Servers - -application|language|notes| -:---------:|:------:|:----| -apache|python
v2 or v3|Connects to multiple apache servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [apache.chart.py](../collectors/python.d.plugin/apache)
configuration file: [python.d/apache.conf](../collectors/python.d.plugin/apache)| -apache|BASH
Shell Script|Connects to an apache server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [apache.chart.sh](../collectors/charts.d.plugin/apache)
configuration file: [charts.d/apache.conf](../collectors/charts.d.plugin/apache)| -ipfs|python
v2 or v3|Connects to multiple ipfs servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [ipfs.chart.py](../collectors/python.d.plugin/ipfs)
configuration file: [python.d/ipfs.conf](../collectors/python.d.plugin/ipfs)| -litespeed|python
v2 or v3|reads the litespeed `rtreport` files to collect metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [litespeed.chart.py](../collectors/python.d.plugin/litespeed)
configuration file: [python.d/litespeed.conf](../collectors/python.d.plugin/litespeed) -nginx|python
v2 or v3|Connects to multiple nginx servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [nginx.chart.py](../collectors/python.d.plugin/nginx)
configuration file: [python.d/nginx.conf](../collectors/python.d.plugin/nginx)| -nginx_plus|python
v2 or v3|Connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [nginx_plus.chart.py](../collectors/python.d.plugin/nginx_plus)
configuration file: [python.d/nginx_plus.conf](../collectors/python.d.plugin/nginx_plus)| -nginx|BASH
Shell Script|Connects to an nginx server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [nginx.chart.sh](../collectors/charts.d.plugin/nginx)
configuration file: [charts.d/nginx.conf](../collectors/charts.d.plugin/nginx)| -phpfpm|python
v2 or v3|Connects to multiple phpfpm servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [phpfpm.chart.py](../collectors/python.d.plugin/phpfpm)
configuration file: [python.d/phpfpm.conf](../collectors/python.d.plugin/phpfpm)| -phpfpm|BASH
Shell Script|Connects to one or more phpfpm servers (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [phpfpm.chart.sh](../collectors/charts.d.plugin/phpfpm)
configuration file: [charts.d/phpfpm.conf](../collectors/charts.d.plugin/phpfpm)| -tomcat|python
v2 or v3|Connects to multiple tomcat servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [tomcat.chart.py](../collectors/python.d.plugin/tomcat)
configuration file: [python.d/tomcat.conf](../collectors/python.d.plugin/tomcat)| -tomcat|BASH
Shell Script|Connects to a tomcat server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [tomcat.chart.sh](../collectors/charts.d.plugin/tomcat)
configuration file: [charts.d/tomcat.conf](../collectors/charts.d.plugin/tomcat)| - - ---- - -### Web Log Parsers - -application|language|notes| -:---------:|:------:|:----| -web_log|python
v2 or v3|powerful plugin, capable of incrementally parsing any number of web server log files
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [web_log.chart.py](../collectors/python.d.plugin/web_log)
configuration file: [python.d/web_log.conf](../collectors/python.d.plugin/web_log)| - - ---- - -### Database Servers - -application|language|notes| -:---------:|:------:|:----| -couchdb|python
v2 or v3|Connects to multiple couchdb servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [couchdb.chart.py](../collectors/python.d.plugin/couchdb)
configuration file: [python.d/couchdb.conf](../collectors/python.d.plugin/couchdb)| -memcached|python
v2 or v3|Connects to multiple memcached servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [memcached.chart.py](../collectors/python.d.plugin/memcached)
configuration file: [python.d/memcached.conf](../collectors/python.d.plugin/memcached)| -mongodb|python
v2 or v3|Connects to multiple `mongodb` servers (local or remote) to collect real-time performance metrics.
 
Requires package `python-pymongo`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [mongodb.chart.py](../collectors/python.d.plugin/mongodb)
configuration file: [python.d/mongodb.conf](../collectors/python.d.plugin/mongodb)| -mysql
mariadb|python
v2 or v3|Connects to multiple mysql or mariadb servers (local or remote) to collect real-time performance metrics.
 
Requires package `python-mysqldb` (faster and preferred), or `python-pymysql`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [mysql.chart.py](../collectors/python.d.plugin/mysql)
configuration file: [python.d/mysql.conf](../collectors/python.d.plugin/mysql)| -mysql
mariadb|BASH
Shell Script|Connects to multiple mysql or mariadb servers (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [mysql.chart.sh](../collectors/charts.d.plugin/mysql)
configuration file: [charts.d/mysql.conf](../collectors/charts.d.plugin/mysql)| -postgres|python
v2 or v3|Connects to multiple postgres servers (local or remote) to collect real-time performance metrics.
 
Requires package `python-psycopg2`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [postgres.chart.py](../collectors/python.d.plugin/postgres)
configuration file: [python.d/postgres.conf](../collectors/python.d.plugin/postgres)| -redis|python
v2 or v3|Connects to multiple redis servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [redis.chart.py](../collectors/python.d.plugin/redis)
configuration file: [python.d/redis.conf](../collectors/python.d.plugin/redis)| -rethinkdb|python
v2 or v3|Connects to multiple rethinkdb servers (local or remote) to collect real-time metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [rethinkdb.chart.py](../collectors/python.d.plugin/rethinkdbs)
configuration file: [python.d/rethinkdb.conf](../collectors/python.d.plugin/rethinkdbs)| - - ---- - -### Social Sharing Servers - -application|language|notes| -:---------:|:------:|:----| -retroshare|python
v2 or v3|Connects to multiple retroshare servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [retroshare.chart.py](../collectors/python.d.plugin/retroshare)
configuration file: [python.d/retroshare.conf](../collectors/python.d.plugin/retroshare)| - - ---- - -### Proxy Servers - -application|language|notes| -:---------:|:------:|:----| -squid|python
v2 or v3|Connects to multiple squid servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [squid.chart.py](../collectors/python.d.plugin/squid)
configuration file: [python.d/squid.conf](../collectors/python.d.plugin/squid)| -squid|BASH
Shell Script|Connects to a squid server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [squid.chart.sh](../collectors/charts.d.plugin/squid)
configuration file: [charts.d/squid.conf](../collectors/charts.d.plugin/squid)| - - ---- - -### HTTP Accelerators - -application|language|notes| -:---------:|:------:|:----| -varnish|python
v2 or v3|Uses the varnishstat command to provide varnish cache statistics (client metrics, cache perfomance, thread-related metrics, backend health, memory usage etc.).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [varnish.chart.py](../collectors/python.d.plugin/varnish)
configuration file: [python.d/varnish.conf](../collectors/python.d.plugin/varnish)| - - ---- - -### Search Engines - -application|language|notes| -:---------:|:------:|:----| -elasticsearch|python
v2 or v3|Monitor elasticsearch performance and health metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [elasticsearch.chart.py](../collectors/python.d.plugin/elasticsearch)
configuration file: [python.d/elasticsearch.conf](../collectors/python.d.plugin/elasticsearch)| - - ---- - -### Name Servers - -application|language|notes| -:---------:|:------:|:----| -named|node.js|Connects to multiple named (ISC-Bind) servers (local or remote) to collect real-time performance metrics. All versions of bind after 9.9.10 are supported.
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [named.node.js](../collectors/node.d.plugin/named)
configuration file: [node.d/named.conf](../collectors/node.d.plugin/named)| -bind_rndc|python
v2 or v3|Parses named.stats dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [bind_rndc.chart.py](../collectors/python.d.plugin/bind_rndc)
configuration file: [python.d/bind_rndc.conf](../collectors/python.d.plugin/bind_rndc)| -nsd|python
v2 or v3|Charts the nsd received queries and zones.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [nsd.chart.py](../collectors/python.d.plugin/nsd)
configuration file: [python.d/nsd.conf](../collectors/python.d.plugin/nsd) -powerdns|python
v2 or v3|Monitors powerdns performance and health metrics
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [powerdns.chart.py](../collectors/python.d.plugin/powerdns)
configuration file: [python.d/powerdns.conf](../collectors/python.d.plugin/powerdns)| -dnsdist|python
v2 or v3|Monitors dnsdist performance and health metrics
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [dnsdist.chart.py](../collectors/python.d.plugin/dnsdist)
configuration file: [python.d/dnsdist.conf](../collectors/python.d.plugin/dnsdist)| -unbound|python
v2 or v3|Monitors Unbound performance and resource usage metrics
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [unbound.chart.py](../collectors/python.d.plugin/unbound)
configuration file: [python.d/unbound.conf](../collectors/python.d.plugin/unbound)| - - ---- - -### DHCP Servers - -application|language|notes| -:---------:|:------:|:----| -isc dhcp|python
v2 or v3|Monitor lease database to show all active leases.
 
Python v2 requires package `python-ipaddress`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [isc-dhcpd.chart.py](../collectors/python.d.plugin/isc_dhcpd)
configuration file: [python.d/isc-dhcpd.conf](../collectors/python.d.plugin/isc_dhcpd)| - - ---- - -### Load Balancers - -application|language|notes| -:---------:|:------:|:----| -haproxy|python
v2 or v3|Monitor frontend, backend and health metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [haproxy.chart.py](../collectors/python.d.plugin/haproxy)
configuration file: [python.d/haproxy.conf](../collectors/python.d.plugin/haproxy)| -traefik|python
v2 or v3|Connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [traefik.chart.py](../collectors/python.d.plugin/traefik)
configuration file: [python.d/traefik.conf](../collectors/python.d.plugin/traefik)| - ---- - -### Message Brokers - -application|language|notes| -:---------:|:------:|:----| -rabbitmq|python
v2 or v3|Monitor rabbitmq performance and health metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [rabbitmq.chart.py](../collectors/python.d.plugin/rabbitmq)
configuration file: [python.d/rabbitmq.conf](../collectors/python.d.plugin/rabbitmq)| -beanstalkd|python
v2 or v3|Provides server and tube level statistics.
 
Requires beanstalkc python package (`pip install beanstalkc` or install package `python-beanstalkc`, which also installs `python-yaml`).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [beanstalk.chart.py](../collectors/python.d.plugin/beanstalk)
configuration file: [python.d/beanstalk.conf](../collectors/python.d.plugin/beanstalk)| - - ---- - -### UPS - -application|language|notes| -:---------:|:------:|:----| -apcupsd|BASH
Shell Script|Connects to an apcupsd server to collect real-time statistics of an APC UPS.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [apcupsd.chart.sh](../collectors/charts.d.plugin/apcupsd)
configuration file: [charts.d/apcupsd.conf](../collectors/charts.d.plugin/apcupsd)| -nut|BASH
Shell Script|Connects to a nut server (upsd) to collect real-time UPS statistics.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [nut.chart.sh](../collectors/charts.d.plugin/nut)
configuration file: [charts.d/nut.conf](../collectors/charts.d.plugin/nut)| - - ---- - -### RAID - -application|language|notes| -:---------:|:------:|:----| -mdstat|python
v2 or v3|Parses `/proc/mdstat` to get mds health metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [mdstat.chart.py](../collectors/python.d.plugin/mdstat)
configuration file: [python.d/mdstat.conf](../collectors/python.d.plugin/mdstat)| -megacli|python
v2 or v3|Collects adapter, physical drives and battery stats..
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [megacli.chart.py](../collectors/python.d.plugin/megacli)
configuration file: [python.d/megacli.conf](../collectors/python.d.plugin/megacli)| - ---- - -### Mail Servers - -application|language|notes| -:---------:|:------:|:----| -dovecot|python
v2 or v3|Connects to multiple dovecot servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [dovecot.chart.py](../collectors/python.d.plugin/dovecot)
configuration file: [python.d/dovecot.conf](../collectors/python.d.plugin/dovecot)| -exim|python
v2 or v3|Charts the exim queue size.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [exim.chart.py](../collectors/python.d.plugin/exim)
configuration file: [python.d/exim.conf](../collectors/python.d.plugin/exim)| -exim|BASH
Shell Script|Charts the exim queue size.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [exim.chart.sh](../collectors/charts.d.plugin/exim)
configuration file: [charts.d/exim.conf](../collectors/charts.d.plugin/exim)| -postfix|python
v2 or v3|Charts the postfix queue size (supports multiple queues).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [postfix.chart.py](../collectors/python.d.plugin/postfix)
configuration file: [python.d/postfix.conf](../collectors/python.d.plugin/postfix)| -postfix|BASH
Shell Script|Charts the postfix queue size.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [postfix.chart.sh](../collectors/charts.d.plugin/postfix)
configuration file: [charts.d/postfix.conf](../collectors/charts.d.plugin/postfix)| - - ---- - -### File Servers - -application|language|notes| -:---------:|:------:|:----| -NFS Client|`C`|This is handled entirely by the netdata daemon.
 
Configuration: `netdata.conf`, section `[plugin:proc:/proc/net/rpc/nfs]`. -NFS Server|`C`|This is handled entirely by the netdata daemon.
 
Configuration: `netdata.conf`, section `[plugin:proc:/proc/net/rpc/nfsd]`. -samba|python
v2 or v3|Performance metrics of Samba SMB2 file sharing.
 
documentation page: [python.d.plugin module samba](../collectors/python.d.plugin/samba)
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [samba.chart.py](../collectors/python.d.plugin/samba)
configuration file: [python.d/samba.conf](../collectors/python.d.plugin/samba)| - - ---- - -### System - -application|language|notes| -:---------:|:------:|:----| -apps|C|`apps.plugin` collects resource usage statistics for all processes running in the system. It groups the entire process tree and reports dozens of metrics for CPU utilization, memory footprint, disk I/O, swap memory, network connections, open files and sockets, etc. It reports metrics for application groups, users and user groups.
 
[Documentation of `apps.plugin`](../collectors/apps.plugin/).
 
netdata plugin: [`apps_plugin.c`](../collectors/apps.plugin)
configuration file: [`apps_groups.conf`](../collectors/apps.plugin)| -cpu_apps|BASH
Shell Script|Collects the CPU utilization of select apps.

DEPRECATED IN FAVOR OF `apps.plugin`. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [cpu_apps.chart.sh](../collectors/charts.d.plugin/cpu_apps)
configuration file: [charts.d/cpu_apps.conf](../collectors/charts.d.plugin/cpu_apps)| -load_average|BASH
Shell Script|Collects the current system load average.

DEPRECATED IN FAVOR OF THE NETDATA INTERNAL ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [load_average.chart.sh](../collectors/charts.d.plugin/load_average)
configuration file: [charts.d/load_average.conf](../collectors/charts.d.plugin/load_average)| -mem_apps|BASH
Shell Script|Collects the memory footprint of select applications.

DEPRECATED IN FAVOR OF `apps.plugin`. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [mem_apps.chart.sh](../collectors/charts.d.plugin/mem_apps)
configuration file: [charts.d/mem_apps.conf](../collectors/charts.d.plugin/mem_apps)| - - ---- - -### Sensors - -application|language|notes| -:---------:|:------:|:----| -cpufreq|python
v2 or v3|Collects the current CPU frequency from `/sys/devices`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [cpufreq.chart.py](../collectors/python.d.plugin/cpufreq)
configuration file: [python.d/cpufreq.conf](../collectors/python.d.plugin/cpufreq)| -cpufreq|BASH
Shell Script|Collects current CPU frequency from `/sys/devices`.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [cpufreq.chart.sh](../collectors/charts.d.plugin/cpufreq)
configuration file: [charts.d/cpufreq.conf](../collectors/charts.d.plugin/cpufreq)| -IPMI|C|Collects temperatures, voltages, currents, power, fans and `SEL` events from IPMI using `libipmimonitoring`.
Check [Monitoring IPMI](../collectors/freeipmi.plugin/) for more information
 
netdata plugin: [freeipmi.plugin](../collectors/freeipmi.plugin)
configuration file: none required - to enable it, compile/install netdata with `--enable-plugin-freeipmi`| -hddtemp|python
v2 or v3|Connects to multiple hddtemp servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [hddtemp.chart.py](../collectors/python.d.plugin/hddtemp)
configuration file: [python.d/hddtemp.conf](../collectors/python.d.plugin/hddtemp)| -hddtemp|BASH
Shell Script|Connects to a hddtemp server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [hddtemp.chart.sh](../collectors/charts.d.plugin/hddtemp)
configuration file: [charts.d/hddtemp.conf](../collectors/charts.d.plugin/hddtemp)| -sensors|BASH
Shell Script|Collects sensors values from files in `/sys`.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [sensors.chart.sh](../collectors/charts.d.plugin/sensors)
configuration file: [charts.d/sensors.conf](../collectors/charts.d.plugin/sensors)| -sensors|python
v2 or v3|Uses `lm-sensors` to collect sensor data.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [sensors.chart.py](../collectors/python.d.plugin/sensors)
configuration file: [python.d/sensors.conf](../collectors/python.d.plugin/sensors)| -smartd_log|python
v2 or v3|Collects the S.M.A.R.T attributes from `smartd` log files.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [smartd_log.chart.py](../collectors/python.d.plugin/smartd_log)
configuration file: [python.d/smartd_log.conf](../collectors/python.d.plugin/smartd_log)| -w1sensor|python
v2 or v3|Collects data from connected 1-Wire sensors.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [w1sensor.chart.py](../collectors/python.d.plugin/w1sensor)
configuration file: [python.d/w1sensor.conf](../collectors/python.d.plugin/w1sensor)| - - ---- - -### Network - -application|language|notes| -:---------:|:------:|:----| -ap|BASH
Shell Script|Uses the `iw` command to provide statistics of wireless clients connected to a wireless access point running on this host (works well with `hostapd`).
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [ap.chart.sh](../collectors/charts.d.plugin/ap)
configuration file: [charts.d/ap.conf](../collectors/charts.d.plugin/ap)| -fping|C|Charts network latency statistics for any number of nodes, using the `fping` command. A recent (probably unreleased) version of fping is required. The plugin supplied can install it in `/usr/local`.
 
netdata plugin: [fping.plugin](../collectors/fping.plugin) (this is a shell wrapper to start fping - once fping is started, netdata and fping communicate directly - it can also install the right version of fping)
configuration file: [fping.conf](../collectors/fping.plugin)| -snmp|node.js|Connects to multiple snmp servers to collect real-time performance metrics.
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [snmp.node.js](../collectors/node.d.plugin/snmp)
configuration file: [node.d/snmp.conf](../collectors/node.d.plugin/snmp)| -dns_query_time|python
v2 or v3|Provides DNS query time statistics.
 
Requires package `dnspython` (`pip install dnspython` or install package `python-dnspython`).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [dns_query_time.chart.py](../collectors/python.d.plugin/dns_query_time)
configuration file: [python.d/dns_query_time.conf](../collectors/python.d.plugin/dns_query_time)| -http|python
v2 or v3|Monitors a generic web page for status code and returned content in HTML -port|ptyhon
v2 or v3|Checks if a generic TCP port for its availability and response time - - ---- - -### Time Servers - -application|language|notes| -:---------:|:------:|:----| -chrony|python
v2 or v3|Uses the chronyc command to provide chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [chrony.chart.py](../collectors/python.d.plugin/chrony)
configuration file: [python.d/chrony.conf](../collectors/python.d.plugin/chrony)| -ntpd|python
v2 or v3|Connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables (if enabled in the configuration).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [ntpd.chart.py](../collectors/python.d.plugin/ntpd)
configuration file: [python.d/ntpd.conf](../collectors/python.d.plugin/ntpd)| - - ---- - -### Security - -application|language|notes| -:---------:|:------:|:----| -freeradius|python
v2 or v3|Uses the radclient command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [freeradius.chart.py](../collectors/python.d.plugin/freeradius)
configuration file: [python.d/freeradius.conf](../collectors/python.d.plugin/freeradius)| -openvpn|python
v2 or v3|All data from openvpn-status.log in your dashboard!
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [ovpn_status_log.chart.py](../collectors/python.d.plugin/ovpn_status_log)
configuration file: [python.d/ovpn_status_log.conf](../collectors/python.d.plugin/ovpn_status_log)| -fail2ban|python
v2 or v3|Monitor fail2ban log file to show all bans for all active jails
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [fail2ban.chart.py](../collectors/python.d.plugin/fail2ban)
configuration file: [python.d/fail2ban.conf](../collectors/python.d.plugin/fail2ban)| - - ---- - -### Telephony Servers - -application|language|notes| -:---------:|:------:|:----| -opensips|BASH
Shell Script|Connects to an opensips server (local only) to collect real-time performance metrics.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [opensips.chart.sh](../collectors/charts.d.plugin/opensips)
configuration file: [charts.d/opensips.conf](../collectors/charts.d.plugin/opensips)| - - ---- - -### Go applications - -application|language|notes| -:---------:|:------:|:----| -go_expvar|python
v2 or v3|Parses metrics exposed by applications written in the Go programming language using the [expvar package](https://golang.org/pkg/expvar/).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [go_expvar.chart.py](../collectors/python.d.plugin/go_expvar)
configuration file: [python.d/go_expvar.conf](../collectors/python.d.plugin/go_expvar)
documentation: [Monitoring Go Applications](../collectors/python.d.plugin/go_expvar/)| - - ---- - -### Household Appliances - -application|language|notes| -:---------:|:------:|:----| -sma_webbox|node.js|Connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation.
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [sma_webbox.node.js](../collectors/node.d.plugin/sma_webbox)
configuration file: [node.d/sma_webbox.conf](../collectors/node.d.plugin/sma_webbox)| -fronius|node.js|Connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation.
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [fronius.node.js](../collectors/node.d.plugin/fronius)
configuration file: [node.d/fronius.conf](../collectors/node.d.plugin/fronius)| -stiebeleltron|node.js|Collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web).
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [stiebeleltron.node.js](../collectors/node.d.plugin/stiebeleltron)
configuration file: [node.d/stiebeleltron.conf](../collectors/node.d.plugin/stiebeleltron)| - - ---- - -### Java Processes - -application|language|notes| -:---------:|:------:|:----| -Spring Boot Application|java|Monitors running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [springboot](../collectors/python.d.plugin/springboot)
configuration file: [python.d/springboot.conf](../collectors/python.d.plugin/springboot) - - ---- - -### Provisioning Systems - -application|language|notes| -:---------:|:------:|:----| -puppet|python
v2 or v3|Connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [puppet.chart.py](../collectors/python.d.plugin/puppet)
configuration file: [python.d/puppet.conf](../collectors/python.d.plugin/puppet)| - ---- - -### Game Servers - -application|language|notes| -:---------:|:------:|:----| -SpigotMC|Python
v2 or v3|Monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [spigotmc.chart.py](../collectors/python.d.plugin/spigotmc)
configuration file: [python.d/spigotmc.conf](../collectors/python.d.plugin/spigotmc)| - ---- - -### Distributed Computing Clients - -application|language|notes| -:---------:|:------:|:----| -BOINC|Python
v2 or v3|Monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions. Requires manual configuration
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [boinc.chart.py](../collectors/python.d.plugin/boinc)
configuration file: [python.d/boinc.conf](../collectors/python.d.plugin/boinc)| - ---- - -### Skeleton Plugins - -application|language|notes| -:---------:|:------:|:----| -example|BASH
Shell Script|Skeleton plugin in BASH.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [example.chart.sh](../collectors/charts.d.plugin/example)
configuration file: [charts.d/example.conf](../collectors/charts.d.plugin/example)| -example|python
v2 or v3|Skeleton plugin in Python.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [example.chart.py](../collectors/python.d.plugin/example)
configuration file: [python.d/example.conf](../collectors/python.d.plugin/example)| diff --git a/doc/Demo-Sites.md b/doc/Demo-Sites.md deleted file mode 100644 index c9e0594ba..000000000 --- a/doc/Demo-Sites.md +++ /dev/null @@ -1,19 +0,0 @@ -# Demo Sites - -Live demo installations of netdata are available at **[https://my-netdata.io](https://my-netdata.io)**: - -Location | netdata demo URL | 60 mins reqs | VM Donated by -:-------:|:-----------------:|:----------:|:------------- -London (UK)|**[london.my-netdata.io](https://london.my-netdata.io)**
(this is the global netdata **registry** and has **named** and **mysql** charts)|[![Requests Per Second](https://london.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) -Atlanta (USA)|**[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**
(with **named** and **mysql** charts)|[![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io)|[CDN77.com](https://www.cdn77.com/) -Israel|**[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)**|[![Requests Per Second](https://octopuscs.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://octopuscs.my-netdata.io)|[OctopusCS.com](https://www.octopuscs.com) -Roubaix (France)|**[ventureer.my-netdata.io](https://ventureer.my-netdata.io)**|[![Requests Per Second](https://ventureer.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://ventureer.my-netdata.io)|[Ventureer.com](https://ventureer.com/) -Madrid (Spain)|**[stackscale.my-netdata.io](https://stackscale.my-netdata.io)**|[![Requests Per Second](https://stackscale.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://stackscale.my-netdata.io)|[StackScale Spain](https://www.stackscale.es/) -Bangalore (India)|**[bangalore.my-netdata.io](https://bangalore.my-netdata.io)**|[![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) -Frankfurt (Germany)|**[frankfurt.my-netdata.io](https://frankfurt.my-netdata.io)**|[![Requests Per Second](https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://frankfurt.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) -New York (USA)|**[newyork.my-netdata.io](https://newyork.my-netdata.io)**|[![Requests Per Second](https://newyork.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://newyork.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) -San Francisco (USA)|**[sanfrancisco.my-netdata.io](https://sanfrancisco.my-netdata.io)**|[![Requests Per Second](https://sanfrancisco.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://sanfrancisco.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) -Singapore|**[singapore.my-netdata.io](https://singapore.my-netdata.io)**|[![Requests Per Second](https://singapore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://singapore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) -Toronto (Canada)|**[toronto.my-netdata.io](https://toronto.my-netdata.io)**|[![Requests Per Second](https://toronto.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://toronto.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) - -*Netdata dashboards are mobile and touch friendly.* diff --git a/doc/Donations-netdata-has-received.md b/doc/Donations-netdata-has-received.md deleted file mode 100644 index 53cff3864..000000000 --- a/doc/Donations-netdata-has-received.md +++ /dev/null @@ -1,23 +0,0 @@ -# Donations received - -This is a list of the donations we have received for netdata (sorted alphabetically on their name): - -what donated|related links|who donated|description of the donation -----:|:-----:|:---:|:----------- -Packages Distribution|-|**[PackageCloud.io](https://packagecloud.io/)**|**PackageCloud.io** donated to a free open-source subscription to their awesome Package Distribution services. -Cross Browser Testing|-|**[BrowserStack.com](https://www.browserstack.com/)**|**BrowserStack.com** donated a free subscription to their awesome Browser Testing services (all three of them: Live, Screenshots, Responsive). -Cloud VM|[cdn77.my-netdata.io](http://cdn77.my-netdata.io)|**[CDN77.com](https://www.cdn77.com/)**|**CDN77.com** donated a VM with 2 CPU cores, 4GB RAM and 20GB HD, on their excellent CDN network. -Localization Management|[netdata localization project](https://crowdin.com/project/netdata) (check issue [#279](https://github.com/netdata/netdata/issues/279))|**[Crowdin.com](https://crowdin.com/)**|**Crowdin.com** donated an open source license to their Localization Management Platform. -Cloud VMs|[london.my-netdata.io](https://london.my-netdata.io) (Several VMs)|**[DigitalOcean.com](https://www.digitalocean.com/)**|**DigitalOcean.com** donated 1000 USD to be used in their excellent Cloud Computing services. Many thanks to [Justin Paine](https://github.com/xxdesmus) for making this happen. -Development IDE|-|**[JetBrains.com](https://www.jetbrains.com/)**|**JetBrains.com** donated an open source license for 4 developers for 1 year, to their excellent IDEs. -Cloud VM|[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)|**[OctopusCS.com](https://octopuscs.com/)**|**OctopusCS.com** donated a VM with 4 CPU cores, 16GB RAM and 50GB HD in their excellent Cloud Computing services. -Cloud VM|[ventureer.my-netdata.io](https://ventureer.my-netdata.io)|**[Ventureer.com](https://ventureer.com/)**|**Ventureer.com** donated a VM with 4 CPU cores, 8GB RAM and 50GB HD in their excellent Cloud Computing services. -Cloud VM|[stackscale.my-netdata.io](https://stackscale.my-netdata.io)|**[stackscale.com](https://www.stackscale.com/)**|**StackScale.com** donated a VM with 4 CPU cores, 16GB RAM and 100GB HD in their excellent Cloud Computing services. - -Thank you! - ---- - -**Do you want to donate?** We are thirsty for on-line services that can help us make netdata better. We also try to build a network of demo sites (VMs) that can help us show the full potential of netdata. - -Please contact me at costa@tsaousis.gr. diff --git a/doc/Netdata-Security-and-Disclosure-Information.md b/doc/Netdata-Security-and-Disclosure-Information.md deleted file mode 100644 index 86adfeeb9..000000000 --- a/doc/Netdata-Security-and-Disclosure-Information.md +++ /dev/null @@ -1,37 +0,0 @@ -# Netdata Security and Disclosure Information - -This page describes netdata security and disclosure information. - -## Security Announcements - -Every time a security issue is fixed in netdata, we immediately release a new version of it. So, to get notified of all security incidents, please subscribe to our releases on github. - -## Report a Vulnerability - -We’re extremely grateful for security researchers and users that report vulnerabilities to Netdata Open Source Community. All reports are thoroughly investigated by a set of community volunteers. - -To make a report, please email the private [security@netdata.cloud](mailto:security@netdata.cloud) list with the security details and the details expected for [all netdata bug reports](../.github/ISSUE_TEMPLATE/bug_report.md). - -## When Should I Report a Vulnerability? - -- You think you discovered a potential security vulnerability in Netdata -- You are unsure how a vulnerability affects Netdata -- You think you discovered a vulnerability in another project that Netdata depends on (e.g. python, node, etc) - -### When Should I NOT Report a Vulnerability? - -- You need help tuning Netdata for security -- You need help applying security related updates -- Your issue is not security related - -## Security Vulnerability Response - -Each report is acknowledged and analyzed by Netdata Team members within 3 working days. This will set off a Security Release Process. - -Any vulnerability information shared with Netdata Team stays within Netdata project and will not be disseminated to other projects unless it is necessary to get the issue fixed. - -As the security issue moves from triage, to identified fix, to release planning we will keep the reporter updated. - -## Public Disclosure Timing - -A public disclosure date is negotiated by the Netdata team and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. As a basic default, we expect report date to disclosure date to be on the order of 7 days. The Netdata team holds the final say when setting a disclosure date. diff --git a/doc/Performance.md b/doc/Performance.md deleted file mode 100644 index ef15a871a..000000000 --- a/doc/Performance.md +++ /dev/null @@ -1,73 +0,0 @@ -# Netdata Performance - -Netdata performance is affected by: - -**Data collection** -- the number of charts for which data are collected -- the number of plugins running -- the technology of the plugins (i.e. BASH plugins are slower than binary plugins) -- the frequency of data collection - -You can control all the above. - -**Web clients accessing the data** -- the duration of the charts in the dashboard -- the number of charts refreshes requested -- the compression level of the web responses - ---- - -## Netdata Daemon - -For most server systems, with a few hundred charts and a few thousand dimensions, the netdata daemon, without any web clients accessing it, should not use more than 1% of a single core. - -To prove netdata scalability, check issue [#1323](https://github.com/netdata/netdata/issues/1323#issuecomment-265501668) where netdata collects 95.000 metrics per second, with 12% CPU utilization of a single core! - -In embedded systems, if the netdata daemon is using a lot of CPU without any web clients accessing it, you should lower the data collection frequency. To set the data collection frequency, edit `/etc/netdata/netdata.conf` and set `update_every` to a higher number (this is the frequency in seconds data are collected for all charts: higher number of seconds = lower frequency, the default is 1 for per second data collection). You can also set this frequency per module or chart. Check the **[[Configuration]]** section. - -## Plugins - -If a plugin is using a lot of CPU, you should lower its update frequency, or if you wrote it, re-factor it to be more CPU efficient. Check **[[External Plugins]]** for more details on writing plugins. - -## CPU consumption when web clients are accessing dashboards - -Netdata is very efficient when servicing web clients. On most server platforms, netdata should be able to serve **1800 web client requests per second per core** for auto-refreshing charts. - -Normally, each user connected will request less than 10 chart refreshes per second (the page may have hundreds of charts, but only the visible are refreshed). So you can expect 180 users per CPU core accessing dashboards before having any delays. - -Netdata runs with the lowest possible process priority, so even if 1000 users are accessing dashboards, it should not influence your applications. CPU utilization will reach 100%, but your applications should get all the CPU they need. - -To lower the CPU utilization of netdata when clients are accessing the dashboard, set `web compression level = 1`, or disable web compression completely by setting `enable web responses gzip compression = no`. Both settings are in the `[web]` section. - - -## Monitoring a heavy loaded system - -Netdata, while running, does not depend on disk I/O (apart its log files and `access.log` is written with buffering enabled and can be disabled). Some plugins that need disk may stop and show gaps during heavy system load, but the netdata daemon itself should be able to work and collect values from `/proc` and `/sys` and serve web clients accessing it. - -Keep in mind that netdata saves its database when it exits and loads it back when restarted. While it is running though, its DB is only stored in RAM and no I/O takes place for it. - - -## Running netdata in embedded devices - -Embedded devices usually have very limited CPU resources available, and in most cases, just a single core. - -We suggest to do the following: - -#### external plugins - - `charts.d.plugin` and `apps.plugin`, each consumes twice the CPU resources of the netdata daemon. - - If you don't need them, disable them (edit `/etc/netdata/netdata.conf` and search for the plugins section). - - If you need them, increase their `update every` value (again in `/etc/netdata/netdata.conf`), so that they do not run that frequently. - -#### internal plugins - -If netdata is still using a lot of CPU, lower its update frequency. Going from per second updates, to once every 2 seconds updates, will cut the CPU resources of all netdata programs **in half**, and you will still have very frequent updates. - -If the CPU of the embedded device is too weak, try setting even lower update frequency. Experiment with `update every = 5` or `update every = 10` (higher number = lower frequency), until you get acceptable results. - -#### Single threaded web server - -Normally, netdata spawns a thread for each web client. This allows netdata to utilize all the available cores for servicing chart refreshes. You can however disable this feature and serve all charts one after another, using a single thread / core. This will might lower the CPU pressure on the embedded device. To enable the single threaded web server, edit `/etc/netdata/netdata.conf` and set `mode = single-threaded` in the `[web]` section. - diff --git a/doc/Running-behind-apache.md b/doc/Running-behind-apache.md deleted file mode 100644 index 02d2be92f..000000000 --- a/doc/Running-behind-apache.md +++ /dev/null @@ -1,268 +0,0 @@ -# netdata via apache's mod_proxy - -Below you can find instructions for configuring an apache server to: - -1. proxy a single netdata via an HTTP and HTTPS virtual host -2. dynamically proxy any number of netdata -3. add user authentication -4. adjust netdata settings to get optimal results - - -## Requirements - -Make sure your apache has installed `mod_proxy` and `mod_proxy_http`. - -On debian/ubuntu systems, install them with this: - -```sh -sudo apt-get install libapache2-mod-proxy-html -``` - -Also make sure they are enabled: - -``` -sudo a2enmod proxy -sudo a2enmod proxy_http -``` - -Ensure your rewrite module is enabled: - -``` -sudo a2enmod rewrite -``` - ---- - -## netdata on an existing virtual host - -On any **existing** and already **working** apache virtual host, you can redirect requests for URL `/netdata/` to one or more netdata servers. - -### proxy one netdata, running on the same server apache runs - -Add the following on top of any existing virtual host. It will allow you to access netdata as `http://virtual.host/netdata/`. - -``` - - - RewriteEngine On - ProxyRequests Off - ProxyPreserveHost On - - - Require all granted - - - # Local netdata server accessed with '/netdata/', at localhost:19999 - ProxyPass "/netdata/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on - ProxyPassReverse "/netdata/" "http://localhost:19999/" - - # if the user did not give the trailing /, add it - # for HTTP (if the virtualhost is HTTP, use this) - RewriteRule ^/netdata$ http://%{HTTP_HOST}/netdata/ [L,R=301] - # for HTTPS (if the virtualhost is HTTPS, use this) - #RewriteRule ^/netdata$ https://%{HTTP_HOST}/netdata/ [L,R=301] - - # rest of virtual host config here - - -``` - -### proxy multiple netdata running on multiple servers - -Add the following on top of any existing virtual host. It will allow you to access multiple netdata as `http://virtual.host/netdata/HOSTNAME/`, where `HOSTNAME` is the hostname of any other netdata server you have (to access the `localhost` netdata, use `http://virtual.host/netdata/localhost/`). - -``` - - - RewriteEngine On - ProxyRequests Off - ProxyPreserveHost On - - - Require all granted - - - # proxy any host, on port 19999 - ProxyPassMatch "^/netdata/([A-Za-z0-9\._-]+)/(.*)" "http://$1:19999/$2" connectiontimeout=5 timeout=30 keepalive=on - - # make sure the user did not forget to add a trailing / - # for HTTP (if the virtualhost is HTTP, use this) - RewriteRule "^/netdata/([A-Za-z0-9\._-]+)$" http://%{HTTP_HOST}/netdata/$1/ [L,R=301] - # for HTTPS (if the virtualhost is HTTPS, use this) - RewriteRule "^/netdata/([A-Za-z0-9\._-]+)$" https://%{HTTP_HOST}/netdata/$1/ [L,R=301] - - # rest of virtual host config here - - -``` - -> IMPORTANT
-> The above config allows your apache users to connect to port 19999 on any server on your network. - -If you want to control the servers your users can connect to, replace the `ProxyPassMatch` line with the following. This allows only `server1`, `server2`, `server3` and `server4`. - -``` - ProxyPassMatch "^/netdata/(server1|server2|server3|server4)/(.*)" "http://$1:19999/$2" connectiontimeout=5 timeout=30 keepalive=on -``` - -## netdata on a dedicated virtual host - -You can proxy netdata through apache, using a dedicated apache virtual host. - -Create a new apache site: - -```sh -nano /etc/apache2/sites-available/netdata.conf -``` - -with this content: - -``` - - RewriteEngine On - ProxyRequests Off - ProxyPreserveHost On - - ServerName netdata.domain.tld - - - Require all granted - - - ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on - ProxyPassReverse "/" "http://localhost:19999/" - - ErrorLog ${APACHE_LOG_DIR}/netdata-error.log - CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined - -``` - -Enable the VirtualHost: - -```sh -sudo a2ensite netdata.conf && service apache2 reload -``` - -## Netdata proxy in Plesk -_Assuming the main goal is to make Netdata running in HTTPS._ -1. Make a subdomain for Netdata on which you enable and force HTTPS - You can use a free Let's Encrypt certificate -2. Go to "Apache & nginx Settings", and in the following section, add: -``` -RewriteEngine on -RewriteRule (.*) http://localhost:19999/$1 [P,L] -``` -3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works. - -Repeat the operation for as many servers as you need. - - -## Enable Basic Auth - -If you wish to add an authentication (user/password) to access your netdata, do these: - -Install the package `apache2-utils`. On debian / ubuntu run `sudo apt-get install apache2-utils`. - -Then, generate password for user `netdata`, using `htpasswd -c /etc/apache2/.htpasswd netdata` - -Modify the virtual host with these: - -``` - # replace the section - - Order deny,allow - Allow from all - - - # add a section - - AuthType Basic - AuthName "Protected site" - AuthUserFile /etc/apache2/.htpasswd - Require valid-user - Order deny,allow - Allow from all - -``` - -Specify `Location /` if netdata is running on dedicated virtual host. - -Note: Changes are applied by reloading or restarting Apache. - -# Netdata configuration - -You might edit `/etc/netdata/netdata.conf` to optimize your setup a bit. For applying these changes you need to restart netdata. - -## Response compression - -If you plan to use netdata exclusively via apache, you can gain some performance by preventing double compression of its output (netdata compresses its response, apache re-compresses it) by editing `/etc/netdata/netdata.conf` and setting: - -``` -[web] - enable gzip compression = no -``` - -Once you disable compression at netdata (and restart it), please verify you receive compressed responses from apache (it is important to receive compressed responses - the charts will be more snappy). - -## Limit direct access to netdata - -You would also need to instruct netdata to listen only on `localhost`, `127.0.0.1` or `::1`. - -``` -[web] - bind to = localhost -``` -or -``` -[web] - bind to = 127.0.0.1 -``` -or -``` -[web] - bind to = ::1 -``` - ---- - -You can also use a unix domain socket. This will also provide a faster route between apache and netdata: - -``` -[web] - bind to = unix:/tmp/netdata.sock -``` -_note: netdata v1.8+ support unix domain sockets_ - -At the apache side, prepend the 2nd argument to `ProxyPass` with `unix:/tmp/netdata.sock|`, like this: - -``` -ProxyPass "/netdata/" "unix:/tmp/netdata.sock|http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on -``` - ---- - -If your apache server is not on localhost, you can set: - -``` -[web] - bind to = * - allow connections from = IP_OF_APACHE_SERVER -``` -_note: netdata v1.9+ support `allow connections from`_ - -`allow connections from` accepts [netdata simple patterns](../libnetdata/simple_pattern/) to match against the connection IP address. - -## prevent the double access.log - -apache logs accesses and netdata logs them too. You can prevent netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: - -``` -[global] - access log = none -``` - -## Troubleshooting mod_proxy - -Make sure the requests reach netdata, by examing `/var/log/netdata/access.log`. - -1. if the requests do not reach netdata, your apache does not forward them. -2. if the requests reach netdata by the URLs are wrong, you have not re-written them properly. diff --git a/doc/Running-behind-caddy.md b/doc/Running-behind-caddy.md deleted file mode 100644 index 2fc3fd634..000000000 --- a/doc/Running-behind-caddy.md +++ /dev/null @@ -1,27 +0,0 @@ -# netdata via Caddy - -To run netdata via [Caddy's proxying,](https://caddyserver.com/docs/proxy) set your Caddyfile up like this: - -``` -netdata.domain.tld { - proxy / localhost:19999 -} -``` - -Other directives can be added between the curly brackets as needed. - -To run netdata in a subfolder: - -``` -netdata.domain.tld { - proxy /netdata/ localhost:19999 { - without /netdata - } -} -``` - -## limit direct access to netdata - -You would also need to instruct netdata to listen only to `127.0.0.1` or `::1`. - -To limit access to netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`. diff --git a/doc/Running-behind-lighttpd.md b/doc/Running-behind-lighttpd.md deleted file mode 100644 index 17fb9c629..000000000 --- a/doc/Running-behind-lighttpd.md +++ /dev/null @@ -1,60 +0,0 @@ -# lighttpd v1.4.x - -Here is a config for accessing netdata in a suburl via lighttpd 1.4.46 and newer: - -```txt -$HTTP["url"] =~ "^/netdata/" { - proxy.server = ( "" => ("netdata" => ( "host" => "127.0.0.1", "port" => 19999 ))) - proxy.header = ( "map-urlpath" => ( "/netdata/" => "/") ) -} -``` - -If you have older lighttpd you have to use a chain (such as bellow), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another). - -```txt -$HTTP["url"] =~ "^/netdata/" { - proxy.server = ( "" => ("" => ( "host" => "127.0.0.1", "port" => 19998 ))) -} - -$SERVER["socket"] == ":19998" { - url.rewrite-once = ( "^/netdata(.*)$" => "/$1" ) - proxy.server = ( "" => ( "" => ( "host" => "127.0.0.1", "port" => 19999 ))) -} -``` - ---- - -If the only thing the server is exposing via the web is netdata (and thus no suburl rewriting required), -then you can get away with just -``` -proxy.server = ( "" => ( ( "host" => "127.0.0.1", "port" => 19999 ))) -``` -Though if it's public facing you might then want to put some authentication on it. htdigest support -looks like: -``` -auth.backend = "htdigest" -auth.backend.htdigest.userfile = "/etc/lighttpd/lighttpd.htdigest" -auth.require = ( "" => ( "method" => "digest", - "realm" => "netdata", - "require" => "valid-user" - ) - ) -``` -other auth methods, and more info on htdigest, can be found in lighttpd's [mod_auth docs](http://redmine.lighttpd.net/projects/lighttpd/wiki/Docs_ModAuth). - ---- - -It seems that lighttpd (or some versions of it), fail to proxy compressed web responses. -To solve this issue, disable web response compression in netdata. - -Open /etc/netdata/netdata.conf and set in [global]: - -``` -enable web responses gzip compression = no -``` - -## limit direct access to netdata - -You would also need to instruct netdata to listen only to `127.0.0.1` or `::1`. - -To limit access to netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`. diff --git a/doc/Running-behind-nginx.md b/doc/Running-behind-nginx.md deleted file mode 100644 index 76062e035..000000000 --- a/doc/Running-behind-nginx.md +++ /dev/null @@ -1,202 +0,0 @@ -# netdata via nginx - -To pass netdata via a nginx, use this: - -### As a virtual host - -``` -upstream backend { - # the netdata server - server 127.0.0.1:19999; - keepalive 64; -} - -server { - # nginx listens to this - listen 80; - - # the virtual host name of this - server_name netdata.example.com; - - location / { - proxy_set_header X-Forwarded-Host $host; - proxy_set_header X-Forwarded-Server $host; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://backend; - proxy_http_version 1.1; - proxy_pass_request_headers on; - proxy_set_header Connection "keep-alive"; - proxy_store off; - } -} -``` - -### As a subfolder to an existing virtual host - -``` -upstream netdata { - server 127.0.0.1:19999; - keepalive 64; -} - -server { - listen 80; - - # the virtual host name of this subfolder should be exposed - #server_name netdata.example.com; - - location = /netdata { - return 301 /netdata/; - } - - location ~ /netdata/(?.*) { - proxy_redirect off; - proxy_set_header Host $host; - - proxy_set_header X-Forwarded-Host $host; - proxy_set_header X-Forwarded-Server $host; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_http_version 1.1; - proxy_pass_request_headers on; - proxy_set_header Connection "keep-alive"; - proxy_store off; - proxy_pass http://netdata/$ndpath$is_args$args; - - gzip on; - gzip_proxied any; - gzip_types *; - } -} -``` - -### As a subfolder for multiple netdata servers, via one nginx - -``` -upstream backend-server1 { - server 10.1.1.103:19999; - keepalive 64; -} -upstream backend-server2 { - server 10.1.1.104:19999; - keepalive 64; -} - -server { - listen 80; - - # the virtual host name of this subfolder should be exposed - #server_name netdata.example.com; - - location ~ /netdata/(?.*)/(?.*) { - proxy_set_header X-Forwarded-Host $host; - proxy_set_header X-Forwarded-Server $host; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_http_version 1.1; - proxy_pass_request_headers on; - proxy_set_header Connection "keep-alive"; - proxy_store off; - proxy_pass http://backend-$behost/$ndpath$is_args$args; - - gzip on; - gzip_proxied any; - gzip_types *; - } - - # make sure there is a trailing slash at the browser - # or the URLs will be wrong - location ~ /netdata/(?.*) { - return 301 /netdata/$behost/; - } -} -``` - -Of course you can add as many backend servers as you like. - -Using the above, you access netdata on the backend servers, like this: - -- `http://nginx.server/netdata/server1/` to reach `backend-server1` -- `http://nginx.server/netdata/server2/` to reach `backend-server2` - - -### Enable authentication - -Create an authentication file to enable the nginx basic authentication. -Do not use authentication without SSL/TLS! -If you haven't one you can do the following: - -``` -printf "yourusername:$(openssl passwd -apr1)" > /etc/nginx/passwords -``` - -And enable the authentication inside your server directive: - -``` -server { - # ... - auth_basic "Protected"; - auth_basic_user_file passwords; - # ... -} -``` - -## limit direct access to netdata - -If your nginx is on `localhost`, you can use this to protect your netdata: - -``` -[web] - bind to = 127.0.0.1 ::1 -``` - ---- - -You can also use a unix domain socket. This will also provide a faster route between nginx and netdata: - -``` -[web] - bind to = unix:/tmp/netdata.sock -``` -_note: netdata v1.8+ support unix domain sockets_ - -At the nginx side, use something like this to use the same unix domain socket: - -``` -upstream backend { - server unix:/tmp/netdata.sock; - keepalive 64; -} -``` - ---- - -If your nginx server is not on localhost, you can set: - -``` -[web] - bind to = * - allow connections from = IP_OF_NGINX_SERVER -``` - -_note: netdata v1.9+ support `allow connections from`_ - -`allow connections from` accepts [netdata simple patterns](../libnetdata/simple_pattern/) to match against the connection IP address. - -## prevent the double access.log - -nginx logs accesses and netdata logs them too. You can prevent netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: - -``` -[global] - access log = none -``` - -## SELinux - -If you get an 502 Bad Gateway error you might check your nginx error log: - -```sh -# cat /var/log/nginx/error.log: -2016/09/09 12:34:05 [crit] 5731#5731: *1 connect() to 127.0.0.1:19999 failed (13: Permission denied) while connecting to upstream, client: 1.2.3.4, server: netdata.example.com, request: "GET / HTTP/2.0", upstream: "http://127.0.0.1:19999/", host: "netdata.example.com" -``` - -If you see something like the above, chances are high that SELinux prevents nginx from connecting to the backend server. To fix that, just use this policy: `setsebool -P httpd_can_network_connect true`. diff --git a/doc/Third-Party-Plugins.md b/doc/Third-Party-Plugins.md deleted file mode 100644 index d50aa417d..000000000 --- a/doc/Third-Party-Plugins.md +++ /dev/null @@ -1,29 +0,0 @@ -# Third-party Plugins - -The following is a list of Netdata plugins distributed by third parties: - -## Nvidia GPUs - -[netdata nv plugin](https://github.com/coraxx/netdata_nv_plugin) monitors nvidia GPUs. - -![image](https://user-images.githubusercontent.com/2662304/29516895-351e905e-867b-11e7-9863-3fb6924490ab.png) - -## teamspeak 3 - -[teamspeak 3 plugin](https://github.com/coraxx/netdata_ts3_plugin) polls active users and bandwidth from TeamSpeak 3 servers. - -## SSH - -[SSH module](https://github.com/Yaser-Amiri/netdata-ssh-module) monitors failed authentication requests of SSH server. - -## interactive users count - -Collect [number of currently logged-on users](https://github.com/veksh/netdata-numsessions) - -## CyberPower UPS - -[cyberups plugin](https://github.com/HawtDogFlvrWtr/netdata_cyberpwrups_plugin) polls the USB connected CyberPower UPS for stats. - -## Nim - -There is an unofficial [nim plugin helper](https://github.com/FedericoCeratto/nim-netdata-plugin) diff --git a/doc/Why-Netdata.md b/doc/Why-Netdata.md deleted file mode 100644 index 57ff722ec..000000000 --- a/doc/Why-Netdata.md +++ /dev/null @@ -1,170 +0,0 @@ -# Why Netdata - -![image8](https://cloud.githubusercontent.com/assets/2662304/14253735/536f4580-fa95-11e5-9f7b-99112b31a5d7.gif) - -## Netdata is unique! - -The following is an animated GIF showing **netdata**'s ability to monitor QoS. The timings of this animation have not been altered, this is the real thing: - -![animation5](https://cloud.githubusercontent.com/assets/2662304/12373715/0da509d8-bc8b-11e5-85cf-39d5234bf976.gif) - -Check the details on this animation: - -1. At the beginning the charts auto-refresh, in real-time -2. Charts can be dragged and zoomed (either mouse or touch) -3. You pan or zoom one, the others follow -4. Mouse over on one, selects the same timestamp on all -5. Dimensions can be enabled or disabled -6. All refreshes are instant (an 8 year old core-2 duo computer was used to record this) - -There are a lot of excellent open source tools for collecting and visualizing performance metrics. Check for example [collectd](https://collectd.org/), [OpenTSDB](http://opentsdb.net/), [influxdb](https://influxdata.com/), [Grafana](http://grafana.org/), etc. - -So, why **netdata**? - -Well, **netdata** has a quite different approach. - -## Simplicity - -> Most monitoring solutions require endless configuration of whatever imaginable. Well, this is a linux box. Why do we need to configure every single metric we need to monitor. Of course it has a CPU and RAM and a few disks, and ethernet ports, it might run a firewall, a web server, or a database server and so on. Why do we need to configure all these metrics? - -**Netdata** has been designed to auto-detect everything. Of course you can enable, tweak or disable things. But by default, if **netdata** can retrieve `/server-status` from an web server you run on your linux box, it will automatically collect all performance metrics. This happens for apache, squid, nginx, mysql, opensips, etc. It will also automatically collect all available system values for CPU, memory, disks, network interfaces, QoS (with labels if you also use [FireQOS](http://firehol.org)), etc. Even for applications that do not offer performance metrics, it will automatically group the whole process tree and provide metrics like CPU usage, memory allocated, opened files, sockets, disk activity, swap activity, etc per application group. - -Netdata supports plenty of [configuration](../daemon/config/). However, we have done everything we can to allow netdata to auto-detect as much as possible. - -Even netdata plugins are designed to support configuration-less operation. So, you just install and run netdata. You will need to configure something only if it cannot be auto-detected. - -> Take any performance monitoring solution and try to troubleshoot a performance problem. At the end of the day you will have to ssh to the server to understand what exactly is happening. You will have to use `iostat`, `iotop`, `vmstat`, `top`, `iperf`, `ethtool` and probably a few dozen more console tools to figure it out. - -With **netdata**, this need is eliminated significantly. Of course you will ssh. Just not for monitoring performance. - -If you install **netdata** you will prefer it over the console tools. **Netdata** visualizes the data, while the console tools just show their values. The detail is the same - I have spent pretty much time reading the source code of the console tools, to figure out what needs to do done in netdata, so that the data, the values, will be the same. Actually, **netdata** is more precise than most console tools, it will interpolate all collected values to second boundary, so that even if something took a few microseconds more to be collected, netdata will correctly estimate the per second value. - -**Netdata** visualizes data in ways you cannot even imagine on a console. It allows you to see the present in real-time, much like the console tools, but also the recent past, compare different metrics with each other, zoom in to see the recent past in detail, or zoom out to have a helicopter view of what is happening in longer durations, build custom dashboards with just the charts you need for a specific purpose. - -Most engineers that install netdata, ssh to the server to tweak system or application settings and at the same time they monitor the result of the new settings in **netdata** on their browser. - -## Per second data collection and visualization - -**Per second data collection and visualization** is usually only available in dedicated console tools, like `top`, `vmstat`, `iostat`, etc. Netdata brings per second data collection and visualization to all applications, accessible through the web. - -*You are not convinced per second data collection is important?* -**Click** this image for a demo: - -[![image](https://cloud.githubusercontent.com/assets/2662304/12373555/abd56f04-bc85-11e5-9fa1-10aa3a4b648b.png)](http://netdata.firehol.org/demo2.html) - -## Realtime monitoring - -> Any performance monitoring solution that does not go down to per second collection and visualization of the data, is useless. It will make you happy to have it, but it will not help you more than that. - -Visualizing the present in **real-time and in great detail**, is the most important value a performance monitoring solution should provide. The next most important is the last hour, again per second. The next is the last 8 hours and so on, up to a week, or at most a month. In my 20+ years in IT, I needed just once or twice to look a year back. And this was mainly out of curiosity. - -Of course real-time monitoring requires resources. **netdata** is designed to be very efficient: - -1. collecting performance data is a repeating process - you do the same thing again and again. **Netdata** has been designed to learn from each iteration, so that the next one will be faster. It learns the sizes of files (it even keeps them open when it can), the number of lines and words per line they contain, the sizes of the buffers it needs to process them, etc. It adapts, so that everything will be as ready as possible for the next iteration. -2. internally, it uses hashes and indexes (b-trees), to speed up lookups of metrics, charts, dimensions, settings. -3. it has an in-memory round robin database based on a custom floating point number that allows it to pack values and flags together, in 32 bits, to lower its memory footprint. -4. its internal web server is capable of generating JSON responses from live performance data with speeds comparable to static content delivery (it does not use `printf`, it is actually 11 times faster than in generating JSON compared to `printf`). - -**Netdata** will use some CPU and memory, but it **will not produce any disk I/O at all**, apart its logs (which you can disable if you like). - -Most servers should have plenty of CPU resources (I consider a hardware upgrade or application split when a server averages around 40% CPU utilization at the peak hour). Even if a server has limited CPU resources available, you can just lower the data collection frequency of **netdata**. Going from per second to every 2 seconds data collection, will cut the **netdata** CPU requirements in half and you will still get charts that are just 2 seconds behind. - -The same goes for memory. If you just keep an hour of data (which is perfect for performance troubleshooting), you will most probably need 15-20MB. You can also enable the kernel de-duper (Kernel Same-Page Merging) and **netdata** will offer to it all its round robin database. KSM can free 20-60% of the memory used by **netdata** (guess why: there are a lot of metrics that are always zero or just constant). - -When netdata runs on modern computers (even on CELERON processors), most chart queries are replied in less than 3 milliseconds! **Not seconds, MILLISECONDS!** Less than 3 milliseconds for calculating the chart, generating JSON text, compressing it and sending it to your web browser. Timings are logged in netdata's `access.log` for you to examine. - -Netdata is written in plain `C` and the key system plugins are written in `C` too. Its speed can only be compared to the native console system administration tools. - -You can also stress test your netdata installation by running the script `tests/stress.sh` found in the distribution. Most modern server hardware can serve more than 300 chart refreshes per second per core. A raspberry pi 2, can serve 300+ chart refreshes per second utilizing all of its 4 cores. - - -## No disk I/O at all - -Netdata does not use any disk I/O, apart from its logs and even these can be disabled. - -Netdata will use some memory (you size it, check [[Memory Requirements]] and CPU (below 2% of a single core for the daemon, plugins may require more, check [[Performance]]), but normally your systems should have plenty of these resources available and spare. - -The design goal of **NO DISK I/O AT ALL** effectively means netdata will not disrupt your applications. - -## No root access - -You don't need to run netdata as root. If started as root, netdata will switch to the `netdata` user (or any other user given in its configuration or command line argument). - -There are a few plugins that in order to collect values need root access. These (and only these) are setuid to root. - -## Visualizes QoS - -Netdata visualizes `tc` QoS classes automatically. If you also use FireQOS, it will also collect interface and class names. - -Check this animated GIF (generated with [ScreenToGif](https://github.com/NickeManarin/ScreenToGif)): - -![animation5](https://cloud.githubusercontent.com/assets/2662304/12373715/0da509d8-bc8b-11e5-85cf-39d5234bf976.gif) - -## Embedded web server - -> Most solutions require dedicated servers to actually use the monitoring console. To my perspective, this is totally unneeded for performance monitoring. All of us have a spectacular tool on our desktops, that allows us to connect in real time to any server in the world: **the web browser**. It shouldn't be so hard to use the same tool to connect in real-time to all our servers. - -With **netdata**, there is no need to centralize anything for performance monitoring. You view everything directly from their source. No need to run something else to access netdata. Of course you can use a firewall, or a reverse proxy, to limit access to it. But for most systems, inside your DMZ, just running it will be enough. - -Still, with **netdata** you can build dashboards with charts from any number of servers. And these charts will be connected to each other much like the ones that come from the same server. You will hover on one and all of them will show the relative value for the selected timestamp. You will zoom or pan one and all of them will follow. **Netdata** achieves that because the logic that connects the charts together is at the browser, not the server, so that all charts presented on the same page are connected, no matter where they come from. - -## Performance monitoring, scaled properly - -"Properly"? What is "properly"? - -We know software solutions can **scale up** (i.e. you replace its resources with bigger ones), or **scale out** (i.e. you add more smaller resources to it). In both cases, to get more of it, you need to supply **more resources**. - -So, what is "scaled properly"? - -Traditionally, monitoring solutions centralize all metric data to provide unified dashboards across all servers. So, you install agents on all your servers to collect system and application metrics which are then sent to a central place for storage and processing. Depending on the solution you use, the central place can either **scale up** or **scale out** (or a mix of the two). - -"Scaled properly" is something completely different. "Scaled properly" minimizes the need for a "central place", so that **there is nothing to be scaled**! - -Wait a moment! You cannot take out the "central place" of a monitoring solution! - -Yes, we can! well... most of it, but before explaining how, let's see what happens today: - -Monitoring solutions are a key component for any online service. These solutions usually consume considerable amount of resources. This is true for both "scale-up" and "scale-out" solutions. These resources require maintenance and administration too. To balance the resources required, these monitoring solutions follow a few simple rules: - -1. The number of metrics collected per server is limited. They collect CPU, RAM, DISK, NETWORK metrics and a few application metrics. - -2. The data collection frequency of each metric is also very low, at best it is once every 10 or 15 seconds, at worst every 5 or 10 mins. - -Due to all the above, most centralized monitoring solutions are usually good for alarms and **statistics of past performance**. The alarms usually trigger every 1 to 5 minutes and you get a few low-resolution charts about the past performance of your servers. - -Well... there is something wrong in this approach! Can you see it? - -Let's see the netdata approach: - -1. Data collection happens **per second**. This allows true real-time performance monitoring. - -2. **Thousands of metrics** per server and application are collected, **every single second**. The number of metrics collected is not a problem. - -3. Data do not leave the server they are collected. Data are not centralized, so the need for a huge central place that will process and store gazillions of data is not needed. - - > Ok, I hear a few of you complaining already - you will find out... patience... - -4. netdata does not use any DISK I/O while running (apart its log files - and even these can be disabled) and netdata runs with the lowest possible process priority, so that **your applications will never be affected by it**. - -5. Each netdata is standalone. Your web browser connects directly to each server to present real-time dashboards. The charts are so snappy, so real-time, so fast that we can call netdata, **a console killer for performance monitoring**. - -The charting libraries **netdata** uses, are the fastest possible ([Dygraphs](http://dygraphs.com/) do make the difference!) and **netdata** respects browser resources. Data are just rendered on a canvas. No processing in javascript at all. - -6. netdata is very efficient: just 2% of a single core is required and some RAM, and you can actually control how much of both you want to allocate to it. - - -Server side, chart data generation scales pretty well. You can expect 400+ chart refreshes per second per core on modern hardware. For a page with 10 charts visible (the page may have hundreds, but only the visible are refreshed), just a tiny fraction of a single CPU core will be used for servicing them. Even these refreshes stop when you switch tabs on your browser, you focus on another window, scroll to a part of the page without charts, zoom or pan a chart. And of course the **netdata** server runs with the lowest possible process priority, so that your production environment, your applications, will not be slowed down by the netdata server. - -7. netdata dashboards can be multi-server (check: [http://my-netdata.io](http://my-netdata.io)) - your browser connects to each netdata server directly. - -So, using netdata, your monitoring infrastructure is embedded on each server, limiting significantly the need of additional resources. netdata is very resource efficient and utilizes server resources that already exist and are spare (on each server). - -Of course, there are a few issues that need to be addressed with this approach: - -1. We need an index of all netdata installations we have -2. We need a place to handle notifications and alarms -3. We need a place to save statistics of past performance - -Our approach uses the netdata [registry](../registry/). The registry solves the problem of maintaining a list of all the netdata installations we have. It does this transparently, without any configuration. It tracks the netdata servers your web browser has visited and bookmarks them at the `my-netdata` menu. - -Every netdata can be a registry. You can use the global one we provided for free, or pick one of your netdata servers and turn it to a registry for your network. diff --git a/doc/a-github-star-is-important.md b/doc/a-github-star-is-important.md deleted file mode 100644 index c00fba300..000000000 --- a/doc/a-github-star-is-important.md +++ /dev/null @@ -1,13 +0,0 @@ -# A GitHub start is important - -**GitHub stars** allow netdata to expand its reach, its community, especially attract people with skills willing to contribute to it. - -Compared to its first release, netdata is now **twice as fast**, has all its bugs settled and a lot more functionality. This happened because a lot of people find it useful, use it daily at home and work, **rely on it** and **contribute to it**. - -**GitHub stars** also **motivate** us. They state that you find our work **useful**. They give us strength to continue, to work **harder** to make it even **better**. - -So, give netdata a **GitHub star**, at the top right of this page. - -Thank you! - -Costa Tsaousis diff --git a/doc/high-performance-netdata.md b/doc/high-performance-netdata.md deleted file mode 100644 index 1671acab8..000000000 --- a/doc/high-performance-netdata.md +++ /dev/null @@ -1,149 +0,0 @@ -# High performance netdata - -If you plan to run a netdata public on the internet, you will get the most performance out of it by following these rules: - -## 1. run behind nginx - -The internal web server is optimized to provide the best experience with few clients connected to it. Normally a web browser will make 4-6 concurrent connections to a web server, so that it can send requests in parallel. To best serve a single client, netdata spawns a thread for each connection it receives (so 4-6 threads per connected web browser). - -If you plan to have your netdata public on the internet, this strategy wastes resources. It provides a lock-free environment so each thread is autonomous to serve the browser, but it does not scale well. Running netdata behind nginx, idle connections to netdata can be reused, thus improving significantly the performance of netdata. - -In the following nginx configuration we do the following: - -- allow nginx to maintain up to 1024 idle connections to netdata (so netdata will have up to 1024 threads waiting for requests) - -- allow nginx to compress the responses of netdata (later we will disable gzip compression at netdata) - -- we disable wordpress pingback attacks and allow only GET, HEAD and OPTIONS requests. - -``` -upstream backend { - server 127.0.0.1:19999; - keepalive 1024; -} - -server { - listen *:80; - server_name my.web.server.name; - - location / { - proxy_set_header X-Forwarded-Host $host; - proxy_set_header X-Forwarded-Server $host; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://backend; - proxy_http_version 1.1; - proxy_pass_request_headers on; - proxy_set_header Connection "keep-alive"; - proxy_store off; - gzip on; - gzip_proxied any; - gzip_types *; - - # Block any HTTP requests other than GET, HEAD, and OPTIONS - limit_except GET HEAD OPTIONS { - deny all; - } - } - - # WordPress Pingback Request Denial - if ($http_user_agent ~* "WordPress") { - return 403; - } - -} -``` - -Then edit `/etc/netdata/netdata.conf` and set these config options: - -``` -[global] - bind socket to IP = 127.0.0.1 - access log = none - disconnect idle web clients after seconds = 3600 - enable web responses gzip compression = no -``` - -These options: - -- `[global].bind socket to IP = 127.0.0.1` makes netdata listen only for requests from localhost (nginx). -- `[global].access log = none` disables the access.log of netdata. It is not needed since netdata only listens for requests on 127.0.0.1 and thus only nginx can access it. nginx has its own access.log for your record. -- `[global].disconnect idle web clients after seconds = 3600` will kill inactive web threads after an hour of inactivity. -- `[global].enable web responses gzip compression = no` disables gzip compression at netdata (nginx will compress the responses). - -## 2. increase open files limit (non-systemd) - -By default Linux limits open file descriptors per process to 1024. This means that less than half of this number of client connections can be accepted by both nginx and netdata. To increase them, create 2 new files: - -1. `/etc/security/limits.d/nginx.conf`, with these contents: - - ``` -nginx soft nofile 10000 -nginx hard nofile 30000 -``` - -2. `/etc/security/limits.d/netdata.conf`, with these contents: - - ``` -netdata soft nofile 10000 -netdata hard nofile 30000 -``` - -and to activate them, run: - -```sh -sysctl -p -``` - -## 2b. increase open files limit (systemd) - -Thanks to [@leleobhz](https://github.com/netdata/netdata/issues/655#issue-163932584), this is what you need to raise the limits using systemd: - -This is based on https://ma.ttias.be/increase-open-files-limit-in-mariadb-on-centos-7-with-systemd/ and here worked as following: - -1. Create the folders in /etc: - - ``` -mkdir -p /etc/systemd/system/netdata.service.d -mkdir -p /etc/systemd/system/nginx.service.d -``` - -2. Create limits.conf in each folder as following: - - ``` -[Service] -LimitNOFILE=30000 -``` - -3. Reload systemd daemon list and restart services: - - ```sh -systemctl daemon-reload -systemctl restart netdata.service -systemctl restart nginx.service -``` - -You can check limits with following commands: - -```sh -cat /proc/$(ps aux | grep "nginx: master process" | grep -v grep | awk '{print $2}')/limits | grep "Max open files" -cat /proc/$(ps aux | grep "netdata" | head -n1 | grep -v grep | awk '{print $2}')/limits | grep "Max open files" -``` - -View of the files: - -```sh -# tree /etc/systemd/system/*service.d/etc/systemd/system/netdata.service.d -/etc/systemd/system/netdata.service.d -└── limits.conf -/etc/systemd/system/nginx.service.d -└── limits.conf - -0 directories, 2 files - -# cat /proc/$(ps aux | grep "nginx: master process" | grep -v grep | awk '{print $2}')/limits | grep "Max open files" -Max open files 30000 30000 files - -# cat /proc/$(ps aux | grep "netdata" | head -n1 | grep -v grep | awk '{print $2}')/limits | grep "Max open files" -Max open files 30000 30000 files - -``` diff --git a/doc/netdata-for-IoT.md b/doc/netdata-for-IoT.md deleted file mode 100644 index ea7798722..000000000 --- a/doc/netdata-for-IoT.md +++ /dev/null @@ -1,199 +0,0 @@ -# Netdata for IoT - -![image1](https://cloud.githubusercontent.com/assets/2662304/14252446/11ae13c4-fa90-11e5-9d03-d93a3eb3317a.gif) - -> New to netdata? Check its demo: **[https://my-netdata.io/](https://my-netdata.io/)** -> -> [![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry) [![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry) [![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry) -> -> [![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry) - ---- - -netdata is a **very efficient** server performance monitoring solution. When running in server hardware, it can collect thousands of system and application metrics **per second** with just 1% CPU utilization of a single core. Its web server responds to most data requests in about **half a millisecond** making its web dashboards spontaneous, amazingly fast! - -netdata can also be a very efficient real-time monitoring solution for **IoT devices** (RPIs, routers, media players, wifi access points, industrial controllers and sensors of all kinds). Netdata will generally run everywhere a Linux kernel runs (and it is glibc and [musl-libc](https://www.musl-libc.org/) friendly). - -You can use it as both a data collection agent (where you pull data using its API), for embedding its charts on other web pages / consoles, but also for accessing it directly with your browser to view its dashboard. - -The netdata web API already provides **reduce** functions allowing it to report **average** and **max** for any timeframe. It can also respond in many formats including JSON, JSONP, CSV, HTML. Its API is also a **google charts** provider so it can directly be used by google sheets, google charts, google widgets. - -![sensors](https://cloud.githubusercontent.com/assets/2662304/15339745/8be84540-1c8e-11e6-9e9a-106dea7539b6.gif) - -Although netdata has been significantly optimized to lower the CPU and RAM resources it consumes, the plethora of data collection plugins may be inappropriate for weak IoT devices. - -> keep in mind that netdata on RPi 2 and 3 does not require any tuning. The default settings will be good. The following tunables apply only when running netdata on RPi 1 or other very weak IoT devices. - -Here are a few tricks to control the resources consumed by netdata: - -## 1. Disable External plugins - -External plugins can consume more system resources than the netdata server. Disable the ones you don't need. - -Edit `/etc/netdata/netdata.conf`, find the `[plugins]` section: - -``` -[plugins] - proc = yes - - tc = no - idlejitter = no - cgroups = no - checks = no - apps = no - charts.d = no - node.d = no - - plugins directory = /usr/libexec/netdata/plugins.d - enable running new plugins = no - check for new plugins every = 60 -``` - -In detail: - -plugin|description -:---:|:--------- -`proc`|the internal plugin used to monitor the system. Normally, you don't want to disable this. You can disable individual functions of it at the next section. -`tc`|monitoring network interfaces QoS (tc classes) -`idlejitter`|internal plugin (written in C) that attempts show if the systems starved for CPU. Disabling it will eliminate a thread. -`cgroups`|monitoring linux containers. Most probably you are not going to need it. This will also eliminate another thread. -`checks`|a debugging plugin, which is disabled by default. -`apps`|a plugin that monitors system processes. It is very complex and heavy (heavier than the netdata daemon), so if you don't need to monitor the process tree, you can disable it. -`charts.d`|BASH plugins (squid, nginx, mysql, etc). This is again a heavy plugin. -`node.d`|node.js plugin, currently used for SNMP data collection and monitoring named (the name server). - -For most IoT devices, you can disable all plugins except `proc`. For `proc` there is another section that controls which functions of it you need. Check the next section. - ---- - -## 2. Disable internal plugins - -In this section you can select which modules of the `proc` plugin you need. All these are run in a single thread, one after another. Still, each one needs some RAM and consumes some CPU cycles. - -``` -[plugin:proc] - # /proc/net/dev = yes # network interfaces - # /proc/diskstats = yes # disks - # /proc/net/snmp = yes # generic IPv4 - # /proc/net/snmp6 = yes # generic IPv6 - # /proc/net/netstat = yes # TCP and UDP - # /proc/net/stat/conntrack = yes # firewall - # /proc/net/ip_vs/stats = yes # IP load balancer - # /proc/net/stat/synproxy = yes # Anti-DDoS - # /proc/stat = yes # CPU, context switches - # /proc/meminfo = yes # Memory - # /proc/vmstat = yes # Memory operations - # /proc/net/rpc/nfsd = yes # NFS Server - # /proc/sys/kernel/random/entropy_avail = yes # Cryptography - # /proc/interrupts = yes # Interrupts - # /proc/softirqs = yes # SoftIRQs - # /proc/loadavg = yes # Load Average - # /sys/kernel/mm/ksm = yes # Memory deduper - # netdata server resources = yes # netdata charts -``` - ---- - -## 3. Disable logs - -Normally, you will not need them. To disable them, set: - -``` -[global] - debug log = none - error log = none - access log = none -``` - ---- - -## 4. Set memory mode to RAM - -Setting the memory mode to `ram` will disable loading and saving the round robin database. This will not affect anything while running netdata, but it might be required if you have very limited storage available. - -``` -[global] - memory mode = ram -``` - ---- - -## 5. CPU utilization - -If after disabling the plugins you don't need, netdata still uses a lot of CPU without any clients accessing the dashboard, try lowering its data collection frequency. Going from "once per second" to "once every two seconds" will not have a significant difference on the user experience, but it will cut the CPU resources required **in half**. - -To set the update frequency, edit `/etc/netdata/netdata.conf` and set: - -``` -[global] - update every = 2 -``` - -You may have to increase this to 5 or 10 if the CPU of the device is weak. - -Keep in mind this will also force dashboard chart refreshes to happen at the same rate. So increasing this number actually lowers data collection frequency but also lowers dashboard chart refreshes frequency. - -This is a dashboard on a device with `[global].update every = 5` (this device is a media player and is now playing a movie): - -![pi1](https://cloud.githubusercontent.com/assets/2662304/15338489/ca84baaa-1c88-11e6-9ab2-118208e11ce1.gif) - ---- - -## 6. Lower memory requirements - -You can set the default size of the round robin database for all charts, using: - -``` -[global] - history = 600 -``` - -The units for history is `[global].update every` seconds. So if `[global].update every = 6` and `[global].history = 600`, you will have an hour of data ( 6 x 600 = 3.600 ), which will store 600 points per dimension, one every 6 seconds. - -Check also [[Memory Requirements]] for directions on calculating the size of the round robin database. - ---- - -## 7. Disable gzip compression of responses - -Gzip compression of the web responses is using more CPU that the rest of netdata. You can lower the compression level or disable gzip compression completely. You can disable it, like this: - -``` -[web] - enable gzip compression = no -``` - -To lower the compression level, do this: - -``` -[web] - enable gzip compression = yes - gzip compression level = 1 -``` - ---- - -Finally, if no web server is installed on your device, you can use port tcp/80 for netdata: - -``` -[global] - port = 80 -``` - ---- - -## 8. Monitoring RPi temperature - -The python version of the sensors plugin uses `lm-sensors`. Unfortunately the temperature reading of RPi are not supported by `lm-sensors`. - -netdata also has a bash version of the sensors plugin that can read RPi temperatures. It is disabled by default to avoid the conflicts with the python version. - -To enable it, edit `/etc/netdata/charts.d.conf` and uncomment this line: - -```sh -sensors=force -``` - -Then restart netdata. You will get this: - -![image](https://user-images.githubusercontent.com/2662304/29658868-23aa65ae-88c5-11e7-9dad-c159600db5cc.png) diff --git a/doc/netdata-security.md b/doc/netdata-security.md deleted file mode 100644 index 79858656b..000000000 --- a/doc/netdata-security.md +++ /dev/null @@ -1,179 +0,0 @@ -# Netdata Security - -We have given special attention to all aspects of netdata, ensuring that everything throughout its operation is as secure as possible. netdata has been designed with security in mind. - -**Table of Contents** - -1. [your data are safe with netdata](#your-data-are-safe-with-netdata) -2. [your systems are safe with netdata](#your-systems-are-safe-with-netdata) -3. [netdata is read-only](#netdata-is-read-only) -4. [netdata viewers authentication](#netdata-viewers-authentication) - - [why netdata should be protected](#why-netdata-should-be-protected) - - [protect netdata from the internet](#protect-netdata-from-the-internet) - - [expose netdata only in a private LAN](#expose-netdata-only-in-a-private-lan) - - [use an authenticating web server in proxy mode](#use-an-authenticating-web-server-in-proxy-mode) - - [other methods](#other-methods) -5. [registry or how to not send any information to a third party server](#registry-or-how-to-not-send-any-information-to-a-third-party-server) - -## your data are safe with netdata - -netdata collects raw data from many sources. For each source, netdata uses a plugin that connects to the source (or reads the relative files produced by the source), receives raw data and processes them to calculate the metrics shown on netdata dashboards. - -Even if netdata plugins connect to your database server, or read your application log file to collect raw data, the product of this data collection process is always a number of **chart metadata and metric values** (summarized data for dashboard visualization). All netdata plugins (internal to the netdata daemon, and external ones written in any computer language), convert raw data collected into metrics, and only these metrics are stored in netdata databases, sent to upstream netdata servers, or archived to backend time-series databases. - -> The **raw data** collected by netdata, do not leave the host they are collected. **The only data netdata exposes are chart metadata and metric values.** - -This means that netdata can safely be used in environments that require the highest level of data isolation (like PCI Level 1). - -## your systems are safe with netdata - -We are very proud that **the netdata daemon runs as a normal system user, without any special privileges**. This is quite an achievement for a monitoring system that collects all kinds of system and application metrics. - -There are a few cases however that raw source data are only exposed to processes with escalated privileges. To support these cases, netdata attempts to minimize and completely isolate the code that runs with escalated privileges. - -So, netdata **plugins**, even those running with escalated capabilities or privileges, perform a **hard coded data collection job**. They do not accept commands from netdata. The communication is strictly **unidirectional**: from the plugin towards the netdata daemon. The original application data collected by each plugin do not leave the process they are collected, are not saved and are not transferred to the netdata daemon. The communication from the plugins to the netdata daemon includes only chart metadata and processed metric values. - -netdata slaves streaming metrics to upstream netdata servers, use exactly the same protocol local plugins use. The raw data collected by the plugins of slave netdata servers are **never leaving the host they are collected**. The only data appearing on the wire are chart metadata and metric values. This communication is also **unidirectional**: slave netdata servers never accept commands from master netdata servers. - -## netdata is read-only - -netdata **dashboards are read-only**. Dashboard users can view and examine metrics collected by netdata, but cannot instruct netdata to do something other than present the already collected metrics. - -netdata dashboards do not expose sensitive information. Business data of any kind, the kernel version, O/S version, application versions, host IPs, etc are not stored and are not exposed by netdata on its dashboards. - -## netdata viewers authentication - -netdata is a monitoring system. It should be protected, the same way you protect all your admin apps. We assume netdata will be installed privately, for your eyes only. - -### why netdata should be protected - -Viewers will be able to get some information about the system netdata is running. This information is everything the dashboard provides. The dashboard includes a list of the services each system runs (the legends of the charts under the `Systemd Services` section), the applications running (the legends of the charts under the `Applications` section), the disks of the system and their names, the user accounts of the system that are running processes (the `Users` and `User Groups` section of the dashboard), the network interfaces and their names (not the IPs) and detailed information about the performance of the system and its applications. - -This information is not sensitive (meaning that it is not your business data), but **it is important for possible attackers**. It will give them clues on what to check, what to try and in the case of DDoS against your applications, they will know if they are doing it right or not. - -Also, viewers could use netdata itself to stress your servers. Although the netdata daemon runs unprivileged, with the minimum process priority (scheduling priority `idle` - lower than nice 19) and adjusts its OutOfMemory (OOM) score to 1000 (so that it will be first to be killed by the kernel if the system starves for memory), some pressure can be applied on your systems if someone attempts a DDoS against netdata. - -### protect netdata from the internet - -netdata is a distributed application. Most likely you will have many installations of it. Since it is distributed and you are expected to jump from server to server, there is very little usability to add authentication local on each netdata. - -Until we add a distributed authentication method to netdata, you have the following options: - -#### expose netdata only in a private LAN - -If your organisation has a private administration and management LAN, you can bind netdata on this network interface on all your servers. This is done in `netdata.conf` with these settings: - -``` -[web] - bind to = 10.1.1.1:19999 localhost:19999 -``` - -You can bind netdata to multiple IPs and ports. If you use hostnames, netdata will resolve them and use all the IPs (in the above example `localhost` usually resolves to both `127.0.0.1` and `::1`). - -**This is the best and the suggested way to protect netdata**. Your systems **should** have a private administration and management LAN, so that all management tasks are performed without any possibility of them being exposed on the internet. - -For cloud based installations, if your cloud provider does not provide such a private LAN (or if you use multiple providers), you can create a virtual management and administration LAN with tools like `tincd` or `gvpe`. These tools create a mesh VPN allowing all servers to communicate securely and privately. Your administration stations join this mesh VPN to get access to management and administration tasks on all your cloud servers. - -For `gvpe` we have developed a [simple provisioning tool](https://github.com/netdata/netdata-demo-site/tree/master/gvpe) you may find handy (it includes statically compiled `gvpe` binaries for Linux and FreeBSD, and also a script to compile `gvpe` on your Mac). We use this to create a management and administration LAN for all netdata demo sites (spread all over the internet using multiple hosting providers). - ---- - -In netdata v1.9+ there is also access list support, like this: - -``` -[web] - bind to = * - allow connections from = localhost 10.* 192.168.* -``` - - -#### use an authenticating web server in proxy mode - -Use **one nginx** (or one apache) server to provide authentication in front of **all your netdata servers**. So, you will be accessing all your netdata with URLs like `http://nginx.host/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). Check [this wiki page for more information on configuring nginx for such a setup](Running-behind-nginx.md#netdata-via-nginx). - -To use this method, you should firewall protect all your netdata servers, so that only the nginx IP will allowed to directly access netdata. To do this, run this on each of your servers (or use your firewall manager): - -```sh -NGINX_IP="1.2.3.4" -iptables -t filter -I INPUT -p tcp --dport 19999 \! -s ${NGINX_IP} -m conntrack --ctstate NEW -j DROP -``` -_commands to allow direct access to netdata from an nginx proxy_ - -The above will prevent anyone except your nginx server to access a netdata dashboard running on the host. - -For netdata v1.9+ you can also use `netdata.conf`: - -``` -[web] - allow connections from = localhost 1.2.3.4 -``` - -Of course you can add more IPs. - -For netdata prior to v1.9, if you want to allow multiple IPs, use this: - -```sh -# space separated list of IPs to allow access netdata -NETDATA_ALLOWED="1.2.3.4 5.6.7.8 9.10.11.12" -NETDATA_PORT=19999 - -# create a new filtering chain || or empty an existing one named netdata -iptables -t filter -N netdata 2>/dev/null || iptables -t filter -F netdata -for x in ${NETDATA_ALLOWED} -do - # allow this IP - iptables -t filter -A netdata -s ${x} -j ACCEPT -done - -# drop all other IPs -iptables -t filter -A netdata -j DROP - -# delete the input chain hook (if it exists) -iptables -t filter -D INPUT -p tcp --dport ${NETDATA_PORT} -m conntrack --ctstate NEW -j netdata 2>/dev/null - -# add the input chain hook (again) -# to send all new netdata connections to our filtering chain -iptables -t filter -I INPUT -p tcp --dport ${NETDATA_PORT} -m conntrack --ctstate NEW -j netdata -``` -_script to allow access to netdata only from a number of hosts_ - -You can run the above any number of times. Each time it runs it refreshes the list of allowed hosts. - -#### other methods - -Of course, there are many more methods you could use to protect netdata: - -- bind netdata to localhost and use `ssh -L 19998:127.0.0.1:19999 remote.netdata.ip` to forward connections of local port 19998 to remote port 19999. This way you can ssh to a netdata server and then use `http://127.0.0.1:19998/` on your computer to access the remote netdata dashboard. - -- If you are always under a static IP, you can use the script given above to allow direct access to your netdata servers without authentication, from all your static IPs. - -- install all your netdata in **headless data collector** mode, forwarding all metrics in real-time to a master netdata server, which will be protected with authentication using an nginx server running locally at the master netdata server. This requires more resources (you will need a bigger master netdata server), but does not require any firewall changes, since all the slave netdata servers will not be listening for incoming connections. - -## registry or how to not send any information to a third party server - -The default configuration uses a public registry under registry.my-netdata.io (more information about the registry here: [mynetdata-menu-item](../registry/) ). Please be aware that if you use that public registry, you submit at least the following information to a third party server, which might violate your security policies: -- Your public ip where the browser runs -- The url where you open the web-ui in the browser (via http request referer) -- The hostnames of the netdata servers - -You are able to run your own registry, which is pretty simple to do: -- If you have just one netdata web-ui, turn on registry and set the url of that web-ui as "registry to announce" -``` -[registry] -enabled = yes -registry to announce = URL_OF_THE_NETDATA_WEB-UI -``` -- If you run multiple netdata servers with web-ui, you need to define one as registry. On that node activate the registry and setting its url as "registry to announce". On all other nodes do not enable the registry but define the same url. - -restart netdata and check with developer tools of your browser which registry is called. - -## netdata directories - -path|owner|permissions| netdata |comments| -:---|:----|:----------|:--------|:-------| -`/etc/netdata`|user `root`
group `netdata`|dirs `0755`
files `0640`|reads|**netdata config files**
may contain sensitive information, so group `netdata` is allowed to read them. -`/usr/libexec/netdata`|user `root`
group `root`|executable by anyone
dirs `0755`
files `0644` or `0755`|executes|**netdata plugins**
permissions depend on the file - not all of them should have the executable flag.
there are a few plugins that run with escalated privileges (Linux capabilities or `setuid`) - these plugins should be executable only by group `netdata`. -`/usr/share/netdata`|user `root`
group `netdata`|readable by anyone
dirs `0755`
files `0644`|reads and sends over the network|**netdata web static files**
these files are sent over the network to anyone that has access to the netdata web server. netdata checks the ownership of these files (using settings at the `[web]` section of `netdata.conf`) and refuses to serve them if they are not properly owned. Symbolic links are not supported. netdata also refuses to serve URLs with `..` in their name. -`/var/cache/netdata`|user `netdata`
group `netdata`|dirs `0750`
files `0660`|reads, writes, creates, deletes|**netdata ephemeral database files**
netdata stores its ephemeral real-time database here. -`/var/lib/netdata`|user `netdata`
group `netdata`|dirs `0750`
files `0660`|reads, writes, creates, deletes|**netdata permanent database files**
netdata stores here the registry data, health alarm log db, etc. -`/var/log/netdata`|user `netdata`
group `root`|dirs `0755`
files `0644`|writes, creates|**netdata log files**
all the netdata applications, logs their errors or other informational messages to files in this directory. These files should be log rotated. diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index a852f3044..000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,104 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later -# author : paulfantom - -# Cross-arch building is achieved by specifying ARCH as a build parameter with `--build-arg` option. -# It is automated in `build.sh` script -ARG ARCH=amd64-v3.8 -FROM multiarch/alpine:${ARCH} as builder - -# Install prerequisites -RUN apk --no-cache add alpine-sdk \ - autoconf \ - automake \ - bash \ - build-base \ - curl \ - jq \ - libmnl-dev \ - libuuid \ - lm_sensors \ - netcat-openbsd \ - nodejs \ - pkgconfig \ - py-mysqldb \ - py-psycopg2 \ - py-yaml \ - python \ - util-linux-dev \ - zlib-dev - -# Copy source -COPY . /opt/netdata.git -WORKDIR /opt/netdata.git - -# Install from source -RUN chmod +x netdata-installer.sh && \ - sync && sleep 1 && \ - ./netdata-installer.sh --dont-wait --dont-start-it - -# files to one directory -RUN mkdir -p /app/usr/sbin/ \ - /app/usr/share \ - /app/usr/libexec \ - /app/usr/lib \ - /app/var/cache \ - /app/var/lib \ - /app/etc && \ - mv /usr/share/netdata /app/usr/share/ && \ - mv /usr/libexec/netdata /app/usr/libexec/ && \ - mv /usr/lib/netdata /app/usr/lib/ && \ - mv /var/cache/netdata /app/var/cache/ && \ - mv /var/lib/netdata /app/var/lib/ && \ - mv /etc/netdata /app/etc/ && \ - mv /usr/sbin/netdata /app/usr/sbin/ && \ - mv docker/run.sh /app/usr/sbin/ && \ - chmod +x /app/usr/sbin/run.sh - -##################################################################### -ARG ARCH -FROM multiarch/alpine:${ARCH} - -# Reinstall some prerequisites -RUN apk --no-cache add curl \ - fping \ - jq \ - libuuid \ - lm_sensors \ - netcat-openbsd \ - nodejs \ - py-mysqldb \ - py-psycopg2 \ - py-yaml \ - python - -# Copy files over -COPY --from=builder /app / - -# Configure system -ARG NETDATA_UID=201 -ARG NETDATA_GID=201 -RUN \ - # fping from alpine apk is on a different location. Moving it. - mv /usr/sbin/fping /usr/local/bin/fping && \ - chmod 4755 /usr/local/bin/fping && \ - mkdir -p /var/log/netdata && \ - # Add netdata user - addgroup -g ${NETDATA_GID} -S netdata && \ - adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G netdata netdata && \ - # Apply the permissions as described in - # https://github.com/netdata/netdata/tree/master/doc/netdata-security.md#netdata-directories - chown -R root:netdata /etc/netdata && \ - chown -R netdata:netdata /var/cache/netdata /var/lib/netdata /usr/share/netdata && \ - chown -R root:netdata /usr/lib/netdata && \ - chown -R root:netdata /usr/libexec/netdata/plugins.d/apps.plugin /usr/libexec/netdata/plugins.d/cgroup-network && \ - chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network /usr/libexec/netdata/plugins.d/apps.plugin && \ - chmod 0750 /var/lib/netdata /var/cache/netdata && \ - # Link log files to stdout - ln -sf /dev/stdout /var/log/netdata/access.log && \ - ln -sf /dev/stdout /var/log/netdata/debug.log && \ - ln -sf /dev/stderr /var/log/netdata/error.log - -ENV NETDATA_PORT 19999 -EXPOSE $NETDATA_PORT - -ENTRYPOINT ["/usr/sbin/run.sh"] diff --git a/docker/README.md b/docker/README.md deleted file mode 100644 index d624855fb..000000000 --- a/docker/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# Install netdata with Docker - -> :warning: As of Sep 9th, 2018 we ship [new docker builds](https://github.com/netdata/netdata/pull/3995), running netdata in docker with an ENTRYPOINT directive, not a COMMAND directive. Please adapt your execution scripts accordingly. -> More information about ENTRYPOINT vs COMMAND is presented by goinbigdata [here](http://goinbigdata.com/docker-run-vs-cmd-vs-entrypoint/) and by docker docs [here](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). -> -> Also, the `latest` is now based on alpine, so **`alpine` is not updated any more** and `armv7hf` is now replaced with `armhf` (to comply with https://github.com/multiarch naming), so **`armv7hf` is not updated** either. - -## Limitations - -Running netdata in a container for monitoring the whole host, can limit its capabilities. Some data is not accessible or not as detailed as when running netdata on the host. - -## Run netdata with docker command - -Quickly start netdata with the docker command line. -Netdata is then available at http://host:19999 - -This is good for an internal network or to quickly analyse a host. - -For a permanent installation on a public server, you should [[secure the netdata instance|netdata-security]]. See below for an example of how to install netdata with an SSL reverse proxy and basic authentication. - -```bash -docker run -d --name=netdata \ - -p 19999:19999 \ - -v /proc:/host/proc:ro \ - -v /sys:/host/sys:ro \ - -v /var/run/docker.sock:/var/run/docker.sock:ro \ - --cap-add SYS_PTRACE \ - --security-opt apparmor=unconfined \ - netdata/netdata -``` - -above can be converted to docker-compose file for ease of management: - -```yaml -version: '3' -services: - netdata: - image: netdata/netdata - hostname: example.com # set to fqdn of host - ports: - - 19999:19999 - cap_add: - - SYS_PTRACE - security_opt: - - apparmor:unconfined - volumes: - - /proc:/host/proc:ro - - /sys:/host/sys:ro - - /var/run/docker.sock:/var/run/docker.sock:ro -``` - -### Docker container names resolution - -If you want to have your container names resolved by netdata it needs to have access to docker group. To achive that just add environment variable `PGID=999` to netdata container, where `999` is a docker group id from your host. This number can be found by running: -```bash -grep docker /etc/group | cut -d ':' -f 3 -``` - -## Install Netdata using Docker Compose with SSL/TLS enabled http proxy - -You can use use the following docker-compose.yml and Caddyfile files to run netdata with docker. -Replace the Domains and email address for Letsencrypt before starting. - -### Prerequisites -* [Docker](https://docs.docker.com/install/#server) -* [Docker Compose](https://docs.docker.com/compose/install/) -* Domain configured in DNS pointing to host. - -### Caddyfile - -This file needs to be placed in /opt with nams Caddyfile. Here you customize your domain and you need to provide your email address to obtain Letsencrypt certificate. -Certificate renewal will happen automatically and will be executed internally by caddy server. - -``` -netdata.example.org { - proxy / netdata:19999 - tls admin@example.org -} -``` - -### docker-compose.yml - -After setting Caddyfile run this with `docker-compose up -d` to have fully functioning netdata setup behind HTTP reverse proxy. - -```yaml -version: '3' -volumes: - caddy: - -services: - caddy: - image: abiosoft/caddy - ports: - - 80:80 - - 443:443 - volumes: - - /opt/Caddyfile:/etc/Caddyfile - - caddy:/root/.caddy - environment: - ACME_AGREE: 'true' - netdata: - restart: always - hostname: netdata.example.org - image: netdata/netdata - cap_add: - - SYS_PTRACE - security_opt: - - apparmor:unconfined - volumes: - - /proc:/host/proc:ro - - /sys:/host/sys:ro - - /var/run/docker.sock:/var/run/docker.sock:ro -``` - -### Restrict access with basic auth - -You can restrict access by following [official caddy guide](https://caddyserver.com/docs/basicauth) and adding lines to Caddyfile. diff --git a/docker/build.sh b/docker/build.sh deleted file mode 100755 index faaa2db79..000000000 --- a/docker/build.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-3.0-or-later -# Author : Pawel Krupa (paulfantom) -# Cross-arch docker build helper script -# Needs docker in version >18.02 due to usage of manifests - -set -e - -if [ "$1" == "" ]; then - VERSION=$(git tag --points-at) -else - VERSION="$1" -fi -if [ "${VERSION}" == "" ]; then - VERSION="latest" -fi - -REPOSITORY="${REPOSITORY:-netdata}" - -echo "Building $VERSION of netdata container" - -declare -A ARCH_MAP -ARCH_MAP=( ["i386"]="386" ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64") - -docker run --rm --privileged multiarch/qemu-user-static:register --reset - -if [ -f Dockerfile ]; then - cd ../ || exit 1 -fi - -# Build images using multi-arch Dockerfile. -for ARCH in i386 armhf aarch64 amd64; do - docker build --build-arg ARCH="${ARCH}-v3.8" \ - --tag "${REPOSITORY}:${VERSION}-${ARCH}" \ - --file docker/Dockerfile ./ & -done -wait - -# Create temporary docker CLI config with experimental features enabled (manifests v2 need it) -mkdir -p /tmp/docker -echo '{"experimental":"enabled"}' > /tmp/docker/config.json - -# Login to docker hub to allow for futher operations -if [ -z ${DOCKER_USERNAME+x} ] || [ -z ${DOCKER_PASSWORD+x} ]; then - echo "No docker hub username or password specified. Exiting without pushing images to registry" - exit 1 -fi -echo "$DOCKER_PASSWORD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin - -# Push images to registry -for ARCH in amd64 i386 armhf aarch64; do - docker --config /tmp/docker push "${REPOSITORY}:${VERSION}-${ARCH}" & -done -wait - -# Recreate docker manifest -docker --config /tmp/docker manifest create --amend \ - "${REPOSITORY}:${VERSION}" \ - "${REPOSITORY}:${VERSION}-i386" \ - "${REPOSITORY}:${VERSION}-armhf" \ - "${REPOSITORY}:${VERSION}-aarch64" \ - "${REPOSITORY}:${VERSION}-amd64" - -# Annotate manifest with CPU architecture information -for ARCH in i386 armhf aarch64 amd64; do - docker --config /tmp/docker manifest annotate "${REPOSITORY}:${VERSION}" "${REPOSITORY}:${VERSION}-${ARCH}" --os linux --arch "${ARCH_MAP[$ARCH]}" -done - -# Push manifest to docker hub -docker --config /tmp/docker manifest push -p "${REPOSITORY}:${VERSION}" - -# Show current manifest (debugging purpose only) -docker --config /tmp/docker manifest inspect "${REPOSITORY}:${VERSION}" - diff --git a/docker/run.sh b/docker/run.sh deleted file mode 100644 index b4cf52c7a..000000000 --- a/docker/run.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -#set -e - -if [ ${PGID+x} ]; then - echo "Adding user netdata to group with id ${PGID}" - addgroup -g "${PGID}" -S hostgroup 2>/dev/null - sed -i "s/${PGID}:$/${PGID}:netdata/g" /etc/group -fi - -exec /usr/sbin/netdata -u netdata -D -s /host -p "${NETDATA_PORT}" "$@" diff --git a/docs/Add-more-charts-to-netdata.md b/docs/Add-more-charts-to-netdata.md new file mode 100644 index 000000000..95efd70bd --- /dev/null +++ b/docs/Add-more-charts-to-netdata.md @@ -0,0 +1,438 @@ +# Add more charts to netdata + +netdata collects system metrics by itself. It has many [internal plugins](../collectors) for collecting most of the metrics presented by default when it starts, collecting data from `/proc`, `/sys` and other Linux kernel sources. + +To collect non-system metrics, netdata supports a plugin architecture. The following are the currently available external plugins: + +- **[Web Servers](#web-servers)**, such as apache, nginx, nginx_plus, tomcat, litespeed +- **[Web Logs](#web-log-parsers)**, such as apache, nginx, lighttpd, gunicorn, squid access logs, apache cache.log +- **[Load Balancers](#load-balancers)**, like haproxy +- **[Message Brokers](#message-brokers)**, like rabbitmq, beanstalkd +- **[Database Servers](#database-servers)**, such as mysql, mariadb, postgres, couchdb, mongodb, rethinkdb +- **[Social Sharing Servers](#social-sharing-servers)**, like retroshare +- **[Proxy Servers](#proxy-servers)**, like squid +- **[HTTP accelerators](#http-accelerators)**, like varnish cache +- **[Search engines](#search-engines)**, like elasticsearch +- **[Name Servers](#name-servers)** (DNS), like bind, nsd, powerdns, dnsdist +- **[DHCP Servers](#dhcp-servers)**, like ISC DHCP +- **[UPS](#ups)**, such as APC UPS, NUT +- **[RAID](#raid)**, such as linux software raid (mdadm), MegaRAID +- **[Mail Servers](#mail-servers)**, like postfix, exim, dovecot +- **[File Servers](#file-servers)**, like samba, NFS, ftp, sftp, WebDAV +- **[Print Servers](#print-servers)**, like CUPS +- **[System](#system)**, for processes and other system metrics +- **[Sensors](#sensors)**, like temperature, fans speed, voltage, humidity, HDD/SSD S.M.A.R.T attributes +- **[Network](#network)**, such as SNMP devices, `fping`, access points, dns_query_time +- **[Time Servers](#time-servers)**, like chrony +- **[Security](#security)**, like FreeRADIUS, OpenVPN, Fail2ban +- **[Telephony Servers](#telephony-servers)**, like openSIPS +- **[Go applications](#go-applications)** +- **[Household appliances](#household-appliances)**, like SMA WebBox (solar power), Fronius Symo solar power, Stiebel Eltron heating +- **[Java Processes](#java-processes)**, via JMX or Spring Boot Actuator +- **[Provisioning Systems](#provisioning-systems)**, like Puppet +- **[Game Servers](#game-servers)**, like SpigotMC +- **[Distributed Computing Clients](#distributed-computing-clients)**, like BOINC +- **[Skeleton Plugins](#skeleton-plugins)**, for writing your own data collectors + +Check also [Third Party Plugins](Third-Party-Plugins.md) for a list of plugins distributed by third parties. + +## configuring plugins + +netdata comes with **internal** and **external** plugins: + +1. The **internal** ones are written in `C` and run as threads within the netdata daemon. +2. The **external** ones can be written in any computer language. The netdata daemon spawns these as processes (shown with `ps fax`) and reads their metrics using pipes (so the `stdout` of external plugins is connected to netdata for metrics collection and the `stderr` of external plugins is connected to `/var/log/netdata/error.log`). + +To make it easier to develop plugins, and minimize the number of threads and processes running, netdata supports **plugin orchestrators**, each of them supporting one or more data collection **modules**. Currently we ship plugin orchestrators for 4 languages: `C`, `python`, `node.js` and `bash` and 2 more are under development (`go` and `java`). + +#### enabling and disabling plugins + +To control which plugins netdata run, edit `netdata.conf` and check the `[plugins]` section. It looks like this: + +``` +[plugins] + # enable running new plugins = yes + # check for new plugins every = 60 + # proc = yes + # diskspace = yes + # cgroups = yes + # cups = yes + # tc = yes + # nfacct = yes + # idlejitter = yes + # freeipmi = yes + # node.d = yes + # python.d = yes + # fping = yes + # charts.d = yes + # apps = yes +``` + +The default for all plugins is the option `enable running new plugins`. So, setting this to `no` will disable all the plugins, except the ones specifically enabled. + +#### enabling and disabling modules + +Each of the **plugins** may support one or more data collection **modules**. To control which of its modules run, you have to consult the configuration of the **plugin** (see table below). + +#### modules configuration + +Most **modules** come with **auto-detection**, configured to work out-of-the-box on popular operating systems with the default settings. + +However, there are cases that auto-detection fails. Usually the reason is that the applications to be monitored do not allow netdata to connect. In most of the cases, allowing the user `netdata` from `localhost` to connect and collect metrics, will automatically enable data collection for the application in question (it will require a netdata restart). + +You can verify netdata **external plugins and their modules** are able to collect metrics, following this procedure: + +```sh +# become user netdata +sudo su -s /bin/bash netdata + +# execute the plugin in debug mode, for a specific module. +# example for the python plugin, mysql module: +/usr/libexec/netdata/plugins.d/python.d.plugin 1 debug trace mysql +``` + +Similarly, you can use `charts.d.plugin` for BASH plugins and `node.d.plugin` for node.js plugins. +Other plugins (like `apps.plugin`, `freeipmi.plugin`, `fping.plugin`) use the native netdata plugin API and can be run directly. + +If you need to configure a netdata plugin or module, all user supplied configuration is kept at `/etc/netdata` while the stock versions of all files is at `/usr/lib/netdata/conf.d`. +To copy a stock file and edit it, run `/etc/netdata/edit-config`. Running this command without an argument, will list the available stock files. + +Each file should provide plenty of examples and documentation about each module and plugin. + +This is a map of the all supported configuration options: + +#### map of configuration files + +plugin | language | plugin
configuration | modules
configuration | +---:|:---:|:---:|:---| +`apps.plugin`
(external plugin for monitoring the process tree on Linux and FreeBSD)|`C`|`netdata.conf` section `[plugin:apps]`|Custom configuration for the processes to be monitored at `apps_groups.conf` +`freebsd.plugin`
(internal plugin for monitoring FreeBSD system resources)|`C`|`netdata.conf` section `[plugin:freebsd]`|one section for each module `[plugin:freebsd:MODULE]`. Each module may provide additional sections in the form of `[plugin:freebsd:MODULE:SUBSECTION]`. +`cgroups.plugin`
(internal plugin for monitoring Linux containers, VMs and systemd services)|`C`|`netdata.conf` section `[plugin:cgroups]`|N/A +`charts.d.plugin`
(external plugin orchestrator for BASH modules)|`BASH`|`charts.d.conf`|a file for each module in `/etc/netdata/charts.d/` +`diskspace.plugin`
(internal plugin for collecting Linux mount points usage)|`C`|`netdata.conf` section `[plugin:diskspace]`|N/A +`fping.plugin`
(external plugin for collecting network latencies)|`C`|`fping.conf`|This plugin is a wrapper for the `fping` command. +`freeipmi.plugin`
(external plugin for collecting IPMI h/w sensors)|`C`|`netdata.conf` section `[plugin:freeipmi]` +`idlejitter.plugin`
(internal plugin for monitoring CPU jitter)|`C`|N/A|N/A +`macos.plugin`
(internal plugin for monitoring MacOS system resources)|`C`|`netdata.conf` section `[plugin:macos]`|one section for each module `[plugin:macos:MODULE]`. Each module may provide additional sections in the form of `[plugin:macos:MODULE:SUBSECTION]`. +`node.d.plugin`
(external plugin orchestrator of node.js modules)|`node.js`|`node.d.conf`|a file for each module in `/etc/netdata/node.d/`. +`proc.plugin`
(internal plugin for monitoring Linux system resources)|`C`|`netdata.conf` section `[plugin:proc]`|one section for each module `[plugin:proc:MODULE]`. Each module may provide additional sections in the form of `[plugin:proc:MODULE:SUBSECTION]`. +`python.d.plugin`
(external plugin orchestrator for running python modules)|`python`
v2 or v3
both are supported|`python.d.conf`|a file for each module in `/etc/netdata/python.d/`. +`statsd.plugin`
(internal plugin for collecting statsd metrics)|`C`|`netdata.conf` section `[statsd]`|Synthetic statsd charts can be configured with files in `/etc/netdata/statsd.d/`. +`tc.plugin`
(internal plugin for collecting Linux traffic QoS)|`C`|`netdata.conf` section `[plugin:tc]`|The plugin runs an external helper called `tc-qos-helper.sh` to interface with the `tc` command. This helper supports a few additional options using `tc-qos-helper.conf`. + + +## writing data collection modules + +You can add custom plugins following the [External Plugins Guide](../collectors/plugins.d/). + +--- + +## available data collection modules + +These are all the data collection plugins currently available. + +### Web Servers + +application|language|notes| +:---------:|:------:|:----| +apache|python
v2 or v3|Connects to multiple apache servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [apache.chart.py](../collectors/python.d.plugin/apache)
configuration file: [python.d/apache.conf](../collectors/python.d.plugin/apache)| +apache|BASH
Shell Script|Connects to an apache server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [apache.chart.sh](../collectors/charts.d.plugin/apache)
configuration file: [charts.d/apache.conf](../collectors/charts.d.plugin/apache)| +ipfs|python
v2 or v3|Connects to multiple ipfs servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [ipfs.chart.py](../collectors/python.d.plugin/ipfs)
configuration file: [python.d/ipfs.conf](../collectors/python.d.plugin/ipfs)| +litespeed|python
v2 or v3|reads the litespeed `rtreport` files to collect metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [litespeed.chart.py](../collectors/python.d.plugin/litespeed)
configuration file: [python.d/litespeed.conf](../collectors/python.d.plugin/litespeed) +nginx|python
v2 or v3|Connects to multiple nginx servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [nginx.chart.py](../collectors/python.d.plugin/nginx)
configuration file: [python.d/nginx.conf](../collectors/python.d.plugin/nginx)| +nginx_plus|python
v2 or v3|Connects to multiple nginx_plus servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [nginx_plus.chart.py](../collectors/python.d.plugin/nginx_plus)
configuration file: [python.d/nginx_plus.conf](../collectors/python.d.plugin/nginx_plus)| +nginx|BASH
Shell Script|Connects to an nginx server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [nginx.chart.sh](../collectors/charts.d.plugin/nginx)
configuration file: [charts.d/nginx.conf](../collectors/charts.d.plugin/nginx)| +phpfpm|python
v2 or v3|Connects to multiple phpfpm servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [phpfpm.chart.py](../collectors/python.d.plugin/phpfpm)
configuration file: [python.d/phpfpm.conf](../collectors/python.d.plugin/phpfpm)| +phpfpm|BASH
Shell Script|Connects to one or more phpfpm servers (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [phpfpm.chart.sh](../collectors/charts.d.plugin/phpfpm)
configuration file: [charts.d/phpfpm.conf](../collectors/charts.d.plugin/phpfpm)| +tomcat|python
v2 or v3|Connects to multiple tomcat servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [tomcat.chart.py](../collectors/python.d.plugin/tomcat)
configuration file: [python.d/tomcat.conf](../collectors/python.d.plugin/tomcat)| +tomcat|BASH
Shell Script|Connects to a tomcat server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [tomcat.chart.sh](../collectors/charts.d.plugin/tomcat)
configuration file: [charts.d/tomcat.conf](../collectors/charts.d.plugin/tomcat)| + + +--- + +### Web Log Parsers + +application|language|notes| +:---------:|:------:|:----| +web_log|python
v2 or v3|powerful plugin, capable of incrementally parsing any number of web server log files
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [web_log.chart.py](../collectors/python.d.plugin/web_log)
configuration file: [python.d/web_log.conf](../collectors/python.d.plugin/web_log)| + + +--- + +### Database Servers + +application|language|notes| +:---------:|:------:|:----| +couchdb|python
v2 or v3|Connects to multiple couchdb servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [couchdb.chart.py](../collectors/python.d.plugin/couchdb)
configuration file: [python.d/couchdb.conf](../collectors/python.d.plugin/couchdb)| +memcached|python
v2 or v3|Connects to multiple memcached servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [memcached.chart.py](../collectors/python.d.plugin/memcached)
configuration file: [python.d/memcached.conf](../collectors/python.d.plugin/memcached)| +mongodb|python
v2 or v3|Connects to multiple `mongodb` servers (local or remote) to collect real-time performance metrics.
 
Requires package `python-pymongo`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [mongodb.chart.py](../collectors/python.d.plugin/mongodb)
configuration file: [python.d/mongodb.conf](../collectors/python.d.plugin/mongodb)| +mysql
mariadb|python
v2 or v3|Connects to multiple mysql or mariadb servers (local or remote) to collect real-time performance metrics.
 
Requires package `python-mysqldb` (faster and preferred), or `python-pymysql`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [mysql.chart.py](../collectors/python.d.plugin/mysql)
configuration file: [python.d/mysql.conf](../collectors/python.d.plugin/mysql)| +mysql
mariadb|BASH
Shell Script|Connects to multiple mysql or mariadb servers (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [mysql.chart.sh](../collectors/charts.d.plugin/mysql)
configuration file: [charts.d/mysql.conf](../collectors/charts.d.plugin/mysql)| +postgres|python
v2 or v3|Connects to multiple postgres servers (local or remote) to collect real-time performance metrics.
 
Requires package `python-psycopg2`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [postgres.chart.py](../collectors/python.d.plugin/postgres)
configuration file: [python.d/postgres.conf](../collectors/python.d.plugin/postgres)| +redis|python
v2 or v3|Connects to multiple redis servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [redis.chart.py](../collectors/python.d.plugin/redis)
configuration file: [python.d/redis.conf](../collectors/python.d.plugin/redis)| +rethinkdb|python
v2 or v3|Connects to multiple rethinkdb servers (local or remote) to collect real-time metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [rethinkdb.chart.py](../collectors/python.d.plugin/rethinkdbs)
configuration file: [python.d/rethinkdb.conf](../collectors/python.d.plugin/rethinkdbs)| + + +--- + +### Social Sharing Servers + +application|language|notes| +:---------:|:------:|:----| +retroshare|python
v2 or v3|Connects to multiple retroshare servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [retroshare.chart.py](../collectors/python.d.plugin/retroshare)
configuration file: [python.d/retroshare.conf](../collectors/python.d.plugin/retroshare)| + + +--- + +### Proxy Servers + +application|language|notes| +:---------:|:------:|:----| +squid|python
v2 or v3|Connects to multiple squid servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [squid.chart.py](../collectors/python.d.plugin/squid)
configuration file: [python.d/squid.conf](../collectors/python.d.plugin/squid)| +squid|BASH
Shell Script|Connects to a squid server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [squid.chart.sh](../collectors/charts.d.plugin/squid)
configuration file: [charts.d/squid.conf](../collectors/charts.d.plugin/squid)| + + +--- + +### HTTP Accelerators + +application|language|notes| +:---------:|:------:|:----| +varnish|python
v2 or v3|Uses the varnishstat command to provide varnish cache statistics (client metrics, cache perfomance, thread-related metrics, backend health, memory usage etc.).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [varnish.chart.py](../collectors/python.d.plugin/varnish)
configuration file: [python.d/varnish.conf](../collectors/python.d.plugin/varnish)| + + +--- + +### Search Engines + +application|language|notes| +:---------:|:------:|:----| +elasticsearch|python
v2 or v3|Monitor elasticsearch performance and health metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [elasticsearch.chart.py](../collectors/python.d.plugin/elasticsearch)
configuration file: [python.d/elasticsearch.conf](../collectors/python.d.plugin/elasticsearch)| + + +--- + +### Name Servers + +application|language|notes| +:---------:|:------:|:----| +named|node.js|Connects to multiple named (ISC-Bind) servers (local or remote) to collect real-time performance metrics. All versions of bind after 9.9.10 are supported.
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [named.node.js](../collectors/node.d.plugin/named)
configuration file: [node.d/named.conf](../collectors/node.d.plugin/named)| +bind_rndc|python
v2 or v3|Parses named.stats dump file to collect real-time performance metrics. All versions of bind after 9.6 are supported.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [bind_rndc.chart.py](../collectors/python.d.plugin/bind_rndc)
configuration file: [python.d/bind_rndc.conf](../collectors/python.d.plugin/bind_rndc)| +nsd|python
v2 or v3|Charts the nsd received queries and zones.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [nsd.chart.py](../collectors/python.d.plugin/nsd)
configuration file: [python.d/nsd.conf](../collectors/python.d.plugin/nsd) +powerdns|python
v2 or v3|Monitors powerdns performance and health metrics
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [powerdns.chart.py](../collectors/python.d.plugin/powerdns)
configuration file: [python.d/powerdns.conf](../collectors/python.d.plugin/powerdns)| +dnsdist|python
v2 or v3|Monitors dnsdist performance and health metrics
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [dnsdist.chart.py](../collectors/python.d.plugin/dnsdist)
configuration file: [python.d/dnsdist.conf](../collectors/python.d.plugin/dnsdist)| +unbound|python
v2 or v3|Monitors Unbound performance and resource usage metrics
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [unbound.chart.py](../collectors/python.d.plugin/unbound)
configuration file: [python.d/unbound.conf](../collectors/python.d.plugin/unbound)| + + +--- + +### DHCP Servers + +application|language|notes| +:---------:|:------:|:----| +isc dhcp|python
v2 or v3|Monitor lease database to show all active leases.
 
Python v2 requires package `python-ipaddress`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [isc-dhcpd.chart.py](../collectors/python.d.plugin/isc_dhcpd)
configuration file: [python.d/isc-dhcpd.conf](../collectors/python.d.plugin/isc_dhcpd)| + + +--- + +### Load Balancers + +application|language|notes| +:---------:|:------:|:----| +haproxy|python
v2 or v3|Monitor frontend, backend and health metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [haproxy.chart.py](../collectors/python.d.plugin/haproxy)
configuration file: [python.d/haproxy.conf](../collectors/python.d.plugin/haproxy)| +traefik|python
v2 or v3|Connects to multiple traefik instances (local or remote) to collect API metrics (response status code, response time, average response time and server uptime).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [traefik.chart.py](../collectors/python.d.plugin/traefik)
configuration file: [python.d/traefik.conf](../collectors/python.d.plugin/traefik)| + +--- + +### Message Brokers + +application|language|notes| +:---------:|:------:|:----| +rabbitmq|python
v2 or v3|Monitor rabbitmq performance and health metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [rabbitmq.chart.py](../collectors/python.d.plugin/rabbitmq)
configuration file: [python.d/rabbitmq.conf](../collectors/python.d.plugin/rabbitmq)| +beanstalkd|python
v2 or v3|Provides server and tube level statistics.
 
Requires beanstalkc python package (`pip install beanstalkc` or install package `python-beanstalkc`, which also installs `python-yaml`).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [beanstalk.chart.py](../collectors/python.d.plugin/beanstalk)
configuration file: [python.d/beanstalk.conf](../collectors/python.d.plugin/beanstalk)| + + +--- + +### UPS + +application|language|notes| +:---------:|:------:|:----| +apcupsd|BASH
Shell Script|Connects to an apcupsd server to collect real-time statistics of an APC UPS.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [apcupsd.chart.sh](../collectors/charts.d.plugin/apcupsd)
configuration file: [charts.d/apcupsd.conf](../collectors/charts.d.plugin/apcupsd)| +nut|BASH
Shell Script|Connects to a nut server (upsd) to collect real-time UPS statistics.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [nut.chart.sh](../collectors/charts.d.plugin/nut)
configuration file: [charts.d/nut.conf](../collectors/charts.d.plugin/nut)| + + +--- + +### RAID + +application|language|notes| +:---------:|:------:|:----| +mdstat|python
v2 or v3|Parses `/proc/mdstat` to get mds health metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [mdstat.chart.py](../collectors/python.d.plugin/mdstat)
configuration file: [python.d/mdstat.conf](../collectors/python.d.plugin/mdstat)| +megacli|python
v2 or v3|Collects adapter, physical drives and battery stats..
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [megacli.chart.py](../collectors/python.d.plugin/megacli)
configuration file: [python.d/megacli.conf](../collectors/python.d.plugin/megacli)| + +--- + +### Mail Servers + +application|language|notes| +:---------:|:------:|:----| +dovecot|python
v2 or v3|Connects to multiple dovecot servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [dovecot.chart.py](../collectors/python.d.plugin/dovecot)
configuration file: [python.d/dovecot.conf](../collectors/python.d.plugin/dovecot)| +exim|python
v2 or v3|Charts the exim queue size.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [exim.chart.py](../collectors/python.d.plugin/exim)
configuration file: [python.d/exim.conf](../collectors/python.d.plugin/exim)| +exim|BASH
Shell Script|Charts the exim queue size.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [exim.chart.sh](../collectors/charts.d.plugin/exim)
configuration file: [charts.d/exim.conf](../collectors/charts.d.plugin/exim)| +postfix|python
v2 or v3|Charts the postfix queue size (supports multiple queues).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [postfix.chart.py](../collectors/python.d.plugin/postfix)
configuration file: [python.d/postfix.conf](../collectors/python.d.plugin/postfix)| +postfix|BASH
Shell Script|Charts the postfix queue size.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [postfix.chart.sh](../collectors/charts.d.plugin/postfix)
configuration file: [charts.d/postfix.conf](../collectors/charts.d.plugin/postfix)| + + +--- + +### File Servers + +application|language|notes| +:---------:|:------:|:----| +NFS Client|`C`|This is handled entirely by the netdata daemon.
 
Configuration: `netdata.conf`, section `[plugin:proc:/proc/net/rpc/nfs]`. +NFS Server|`C`|This is handled entirely by the netdata daemon.
 
Configuration: `netdata.conf`, section `[plugin:proc:/proc/net/rpc/nfsd]`. +samba|python
v2 or v3|Performance metrics of Samba SMB2 file sharing.
 
documentation page: [python.d.plugin module samba](../collectors/python.d.plugin/samba)
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [samba.chart.py](../collectors/python.d.plugin/samba)
configuration file: [python.d/samba.conf](../collectors/python.d.plugin/samba)| + +### Print Servers + +application|language|notes| +:---------:|:------:|:----| +CUPS|C|Charts metrics of printers, jobs and other cups destinations.
 
netdata plugin: cups.plugin + +--- + +### System + +application|language|notes| +:---------:|:------:|:----| +apps|C|`apps.plugin` collects resource usage statistics for all processes running in the system. It groups the entire process tree and reports dozens of metrics for CPU utilization, memory footprint, disk I/O, swap memory, network connections, open files and sockets, etc. It reports metrics for application groups, users and user groups.
 
[Documentation of `apps.plugin`](../collectors/apps.plugin/).
 
netdata plugin: [`apps_plugin.c`](../collectors/apps.plugin)
configuration file: [`apps_groups.conf`](../collectors/apps.plugin)| +cpu_apps|BASH
Shell Script|Collects the CPU utilization of select apps.

DEPRECATED IN FAVOR OF `apps.plugin`. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [cpu_apps.chart.sh](../collectors/charts.d.plugin/cpu_apps)
configuration file: [charts.d/cpu_apps.conf](../collectors/charts.d.plugin/cpu_apps)| +load_average|BASH
Shell Script|Collects the current system load average.

DEPRECATED IN FAVOR OF THE NETDATA INTERNAL ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [load_average.chart.sh](../collectors/charts.d.plugin/load_average)
configuration file: [charts.d/load_average.conf](../collectors/charts.d.plugin/load_average)| +mem_apps|BASH
Shell Script|Collects the memory footprint of select applications.

DEPRECATED IN FAVOR OF `apps.plugin`. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [mem_apps.chart.sh](../collectors/charts.d.plugin/mem_apps)
configuration file: [charts.d/mem_apps.conf](../collectors/charts.d.plugin/mem_apps)| + + +--- + +### Sensors + +application|language|notes| +:---------:|:------:|:----| +cpufreq|python
v2 or v3|Collects the current CPU frequency from `/sys/devices`.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [cpufreq.chart.py](../collectors/python.d.plugin/cpufreq)
configuration file: [python.d/cpufreq.conf](../collectors/python.d.plugin/cpufreq)| +cpufreq|BASH
Shell Script|Collects current CPU frequency from `/sys/devices`.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [cpufreq.chart.sh](../collectors/charts.d.plugin/cpufreq)
configuration file: [charts.d/cpufreq.conf](../collectors/charts.d.plugin/cpufreq)| +IPMI|C|Collects temperatures, voltages, currents, power, fans and `SEL` events from IPMI using `libipmimonitoring`.
Check [Monitoring IPMI](../collectors/freeipmi.plugin/) for more information
 
netdata plugin: [freeipmi.plugin](../collectors/freeipmi.plugin)
configuration file: none required - to enable it, compile/install netdata with `--enable-plugin-freeipmi`| +hddtemp|python
v2 or v3|Connects to multiple hddtemp servers (local or remote) to collect real-time performance metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [hddtemp.chart.py](../collectors/python.d.plugin/hddtemp)
configuration file: [python.d/hddtemp.conf](../collectors/python.d.plugin/hddtemp)| +hddtemp|BASH
Shell Script|Connects to a hddtemp server (local or remote) to collect real-time performance metrics.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [hddtemp.chart.sh](../collectors/charts.d.plugin/hddtemp)
configuration file: [charts.d/hddtemp.conf](../collectors/charts.d.plugin/hddtemp)| +sensors|BASH
Shell Script|Collects sensors values from files in `/sys`.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [sensors.chart.sh](../collectors/charts.d.plugin/sensors)
configuration file: [charts.d/sensors.conf](../collectors/charts.d.plugin/sensors)| +sensors|python
v2 or v3|Uses `lm-sensors` to collect sensor data.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [sensors.chart.py](../collectors/python.d.plugin/sensors)
configuration file: [python.d/sensors.conf](../collectors/python.d.plugin/sensors)| +smartd_log|python
v2 or v3|Collects the S.M.A.R.T attributes from `smartd` log files.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [smartd_log.chart.py](../collectors/python.d.plugin/smartd_log)
configuration file: [python.d/smartd_log.conf](../collectors/python.d.plugin/smartd_log)| +w1sensor|python
v2 or v3|Collects data from connected 1-Wire sensors.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [w1sensor.chart.py](../collectors/python.d.plugin/w1sensor)
configuration file: [python.d/w1sensor.conf](../collectors/python.d.plugin/w1sensor)| + + +--- + +### Network + +application|language|notes| +:---------:|:------:|:----| +ap|BASH
Shell Script|Uses the `iw` command to provide statistics of wireless clients connected to a wireless access point running on this host (works well with `hostapd`).
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [ap.chart.sh](../collectors/charts.d.plugin/ap)
configuration file: [charts.d/ap.conf](../collectors/charts.d.plugin/ap)| +fping|C|Charts network latency statistics for any number of nodes, using the `fping` command. A recent (probably unreleased) version of fping is required. The plugin supplied can install it in `/usr/local`.
 
netdata plugin: [fping.plugin](../collectors/fping.plugin) (this is a shell wrapper to start fping - once fping is started, netdata and fping communicate directly - it can also install the right version of fping)
configuration file: [fping.conf](../collectors/fping.plugin)| +snmp|node.js|Connects to multiple snmp servers to collect real-time performance metrics.
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [snmp.node.js](../collectors/node.d.plugin/snmp)
configuration file: [node.d/snmp.conf](../collectors/node.d.plugin/snmp)| +dns_query_time|python
v2 or v3|Provides DNS query time statistics.
 
Requires package `dnspython` (`pip install dnspython` or install package `python-dnspython`).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [dns_query_time.chart.py](../collectors/python.d.plugin/dns_query_time)
configuration file: [python.d/dns_query_time.conf](../collectors/python.d.plugin/dns_query_time)| +http|python
v2 or v3|Monitors a generic web page for status code and returned content in HTML +port|ptyhon
v2 or v3|Checks if a generic TCP port for its availability and response time + + +--- + +### Time Servers + +application|language|notes| +:---------:|:------:|:----| +chrony|python
v2 or v3|Uses the chronyc command to provide chrony statistics (Frequency, Last offset, RMS offset, Residual freq, Root delay, Root dispersion, Skew, System time).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [chrony.chart.py](../collectors/python.d.plugin/chrony)
configuration file: [python.d/chrony.conf](../collectors/python.d.plugin/chrony)| +ntpd|python
v2 or v3|Connects to multiple ntpd servers (local or remote) to provide statistics of system variables and optional also peer variables (if enabled in the configuration).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [ntpd.chart.py](../collectors/python.d.plugin/ntpd)
configuration file: [python.d/ntpd.conf](../collectors/python.d.plugin/ntpd)| + + +--- + +### Security + +application|language|notes| +:---------:|:------:|:----| +freeradius|python
v2 or v3|Uses the radclient command to provide freeradius statistics (authentication, accounting, proxy-authentication, proxy-accounting).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [freeradius.chart.py](../collectors/python.d.plugin/freeradius)
configuration file: [python.d/freeradius.conf](../collectors/python.d.plugin/freeradius)| +openvpn|python
v2 or v3|All data from openvpn-status.log in your dashboard!
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [ovpn_status_log.chart.py](../collectors/python.d.plugin/ovpn_status_log)
configuration file: [python.d/ovpn_status_log.conf](../collectors/python.d.plugin/ovpn_status_log)| +fail2ban|python
v2 or v3|Monitor fail2ban log file to show all bans for all active jails
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [fail2ban.chart.py](../collectors/python.d.plugin/fail2ban)
configuration file: [python.d/fail2ban.conf](../collectors/python.d.plugin/fail2ban)| + + +--- + +### Telephony Servers + +application|language|notes| +:---------:|:------:|:----| +opensips|BASH
Shell Script|Connects to an opensips server (local only) to collect real-time performance metrics.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [opensips.chart.sh](../collectors/charts.d.plugin/opensips)
configuration file: [charts.d/opensips.conf](../collectors/charts.d.plugin/opensips)| + + +--- + +### Go applications + +application|language|notes| +:---------:|:------:|:----| +go_expvar|python
v2 or v3|Parses metrics exposed by applications written in the Go programming language using the [expvar package](https://golang.org/pkg/expvar/).
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [go_expvar.chart.py](../collectors/python.d.plugin/go_expvar)
configuration file: [python.d/go_expvar.conf](../collectors/python.d.plugin/go_expvar)
documentation: [Monitoring Go Applications](../collectors/python.d.plugin/go_expvar/)| + + +--- + +### Household Appliances + +application|language|notes| +:---------:|:------:|:----| +sma_webbox|node.js|Connects to multiple remote SMA webboxes to collect real-time performance metrics of the photovoltaic (solar) power generation.
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [sma_webbox.node.js](../collectors/node.d.plugin/sma_webbox)
configuration file: [node.d/sma_webbox.conf](../collectors/node.d.plugin/sma_webbox)| +fronius|node.js|Connects to multiple remote Fronius Symo servers to collect real-time performance metrics of the photovoltaic (solar) power generation.
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [fronius.node.js](../collectors/node.d.plugin/fronius)
configuration file: [node.d/fronius.conf](../collectors/node.d.plugin/fronius)| +stiebeleltron|node.js|Collects the temperatures and other metrics from your Stiebel Eltron heating system using their Internet Service Gateway (ISG web).
 
netdata plugin: [node.d.plugin](../collectors/node.d.plugin#nodedplugin)
plugin module: [stiebeleltron.node.js](../collectors/node.d.plugin/stiebeleltron)
configuration file: [node.d/stiebeleltron.conf](../collectors/node.d.plugin/stiebeleltron)| + + +--- + +### Java Processes + +application|language|notes| +:---------:|:------:|:----| +Spring Boot Application|java|Monitors running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [springboot](../collectors/python.d.plugin/springboot)
configuration file: [python.d/springboot.conf](../collectors/python.d.plugin/springboot) + + +--- + +### Provisioning Systems + +application|language|notes| +:---------:|:------:|:----| +puppet|python
v2 or v3|Connects to multiple Puppet Server and Puppet DB instances (local or remote) to collect real-time status metrics.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [puppet.chart.py](../collectors/python.d.plugin/puppet)
configuration file: [python.d/puppet.conf](../collectors/python.d.plugin/puppet)| + +--- + +### Game Servers + +application|language|notes| +:---------:|:------:|:----| +SpigotMC|Python
v2 or v3|Monitors Spigot Minecraft server ticks per second and number of online players using the Minecraft remote console.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [spigotmc.chart.py](../collectors/python.d.plugin/spigotmc)
configuration file: [python.d/spigotmc.conf](../collectors/python.d.plugin/spigotmc)| + +--- + +### Distributed Computing Clients + +application|language|notes| +:---------:|:------:|:----| +BOINC|Python
v2 or v3|Monitors task states for local and remote BOINC client software using the remote GUI RPC interface. Also provides alarms for a handful of error conditions. Requires manual configuration
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [boinc.chart.py](../collectors/python.d.plugin/boinc)
configuration file: [python.d/boinc.conf](../collectors/python.d.plugin/boinc)| + +--- + +### Skeleton Plugins + +application|language|notes| +:---------:|:------:|:----| +example|BASH
Shell Script|Skeleton plugin in BASH.

DEPRECATED IN FAVOR OF THE PYTHON ONE. It is still supplied only as an example module to shell scripting plugins.
 
netdata plugin: [charts.d.plugin](../collectors/charts.d.plugin#chartsdplugin)
plugin module: [example.chart.sh](../collectors/charts.d.plugin/example)
configuration file: [charts.d/example.conf](../collectors/charts.d.plugin/example)| +example|python
v2 or v3|Skeleton plugin in Python.
 
netdata plugin: [python.d.plugin](../collectors/python.d.plugin)
plugin module: [example.chart.py](../collectors/python.d.plugin/example)
configuration file: [python.d/example.conf](../collectors/python.d.plugin/example)| + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FAdd-more-charts-to-netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Charts.md b/docs/Charts.md new file mode 100644 index 000000000..64c36302f --- /dev/null +++ b/docs/Charts.md @@ -0,0 +1,27 @@ +# Charts, contexts, families + +Before configuring an alarm or writing a collector, it's important to understand how Netdata organizes collected metrics into charts. + +## Charts + +Each chart that you see on the netdata dashboard contains one or more dimensions, one for each collected or calculated metric. + +The chart name or chart id is what you see in parentheses at the top left corner of the chart you are interested in. For example, if you go to the system cpu chart: `http://your.netdata.ip:19999/#menu_system_submenu_cpu`, you will see at the top left of the chart the label "Total CPU utilization (system.cpu)". In this case, the chart name is `system.cpu`. + +## Dimensions + +Most charts depict more than one dimensions. The dimensions of a chart are called "series" in some applications. You can see these dimensions on the right side of a chart, right under the date and time. For the system.cpu example we used, you will see the dimensions softirq, irq, user etc. Note that these are not always simple metrics (raw data). They could be calculated values (percentages, aggregates and more). + +## Families + +When you have several instances of a monitored hardware or software resource (e.g. network interfaces, mysql instances etc.), you need to be able to identify each one separately. Netdata uses "families" to identify such instances. For example, if I have the network interfaces `eth0` and `eth1`, `eth0` will be one family, and `eth1` will be another. + +The reasoning behind calling these instances "families" is that different charts for the same instance can and many times are related (relatives, family, you get it). The family of a chart is usually the name of the netdata dashboard submenu that you see selected on the right navigation pane, when you are looking at a chart. For the example of the two network interfaces, you would see a submenu `eth0` and a submenu `eth1` under the "Network Interfaces" menu on the right navigation pane. + +## Contexts + +A context is a grouping of identical charts, for each instance of the hardware or software monitored. For example, `health/health.d/net.conf` refers to four contexts: `net.drops`, `net.fifo`, `net.net`, `net.packets`. You can see the context of a chart if you hover over the date right above the dimensions of the chart. The line that appears shows you two things: the collector that produces the chart and the chart context. + +For example, let's take the `net.packets` context. You will see on the dashboard as many charts with context net.packets as you have network interfaces (families). These charts will be named `net_packets.[family]`. For the example of the two interfaces `eth0` and `eth1`, you will see charts named `net_packets.eth0` and `net_packets.eth1`. Both of these charts show the exact same dimensions, but for different instances of a network interface. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FCharts&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Demo-Sites.md b/docs/Demo-Sites.md new file mode 100644 index 000000000..f6aad1398 --- /dev/null +++ b/docs/Demo-Sites.md @@ -0,0 +1,21 @@ +# Demo sites + +Live demo installations of netdata are available at **[https://my-netdata.io](https://my-netdata.io)**: + +Location | netdata demo URL | 60 mins reqs | VM Donated by +:-------:|:-----------------:|:----------:|:------------- +London (UK)|**[london.my-netdata.io](https://london.my-netdata.io)**
(this is the global netdata **registry** and has **named** and **mysql** charts)|[![Requests Per Second](https://london.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) +Atlanta (USA)|**[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**
(with **named** and **mysql** charts)|[![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io)|[CDN77.com](https://www.cdn77.com/) +Israel|**[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)**|[![Requests Per Second](https://octopuscs.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://octopuscs.my-netdata.io)|[OctopusCS.com](https://www.octopuscs.com) +Roubaix (France)|**[ventureer.my-netdata.io](https://ventureer.my-netdata.io)**|[![Requests Per Second](https://ventureer.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://ventureer.my-netdata.io)|[Ventureer.com](https://ventureer.com/) +Madrid (Spain)|**[stackscale.my-netdata.io](https://stackscale.my-netdata.io)**|[![Requests Per Second](https://stackscale.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://stackscale.my-netdata.io)|[StackScale Spain](https://www.stackscale.es/) +Bangalore (India)|**[bangalore.my-netdata.io](https://bangalore.my-netdata.io)**|[![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) +Frankfurt (Germany)|**[frankfurt.my-netdata.io](https://frankfurt.my-netdata.io)**|[![Requests Per Second](https://frankfurt.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://frankfurt.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) +New York (USA)|**[newyork.my-netdata.io](https://newyork.my-netdata.io)**|[![Requests Per Second](https://newyork.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://newyork.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) +San Francisco (USA)|**[sanfrancisco.my-netdata.io](https://sanfrancisco.my-netdata.io)**|[![Requests Per Second](https://sanfrancisco.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://sanfrancisco.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) +Singapore|**[singapore.my-netdata.io](https://singapore.my-netdata.io)**|[![Requests Per Second](https://singapore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://singapore.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) +Toronto (Canada)|**[toronto.my-netdata.io](https://toronto.my-netdata.io)**|[![Requests Per Second](https://toronto.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://toronto.my-netdata.io)|[DigitalOcean.com](https://m.do.co/c/83dc9f941745) + +*Netdata dashboards are mobile and touch friendly.* + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FDemo-Sites&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Donations-netdata-has-received.md b/docs/Donations-netdata-has-received.md new file mode 100644 index 000000000..3c737be8a --- /dev/null +++ b/docs/Donations-netdata-has-received.md @@ -0,0 +1,25 @@ +# Donations + +This is a list of the donations we have received for netdata (sorted alphabetically on their name): + +what donated|related links|who donated|description of the donation +----:|:-----:|:---:|:----------- +Packages Distribution|-|**[PackageCloud.io](https://packagecloud.io/)**|**PackageCloud.io** donated to a free open-source subscription to their awesome Package Distribution services. +Cross Browser Testing|-|**[BrowserStack.com](https://www.browserstack.com/)**|**BrowserStack.com** donated a free subscription to their awesome Browser Testing services (all three of them: Live, Screenshots, Responsive). +Cloud VM|[cdn77.my-netdata.io](http://cdn77.my-netdata.io)|**[CDN77.com](https://www.cdn77.com/)**|**CDN77.com** donated a VM with 2 CPU cores, 4GB RAM and 20GB HD, on their excellent CDN network. +Localization Management|[netdata localization project](https://crowdin.com/project/netdata) (check issue [#279](https://github.com/netdata/netdata/issues/279))|**[Crowdin.com](https://crowdin.com/)**|**Crowdin.com** donated an open source license to their Localization Management Platform. +Cloud VMs|[london.my-netdata.io](https://london.my-netdata.io) (Several VMs)|**[DigitalOcean.com](https://www.digitalocean.com/)**|**DigitalOcean.com** donated 1000 USD to be used in their excellent Cloud Computing services. Many thanks to [Justin Paine](https://github.com/xxdesmus) for making this happen. +Development IDE|-|**[JetBrains.com](https://www.jetbrains.com/)**|**JetBrains.com** donated an open source license for 4 developers for 1 year, to their excellent IDEs. +Cloud VM|[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)|**[OctopusCS.com](https://octopuscs.com/)**|**OctopusCS.com** donated a VM with 4 CPU cores, 16GB RAM and 50GB HD in their excellent Cloud Computing services. +Cloud VM|[ventureer.my-netdata.io](https://ventureer.my-netdata.io)|**[Ventureer.com](https://ventureer.com/)**|**Ventureer.com** donated a VM with 4 CPU cores, 8GB RAM and 50GB HD in their excellent Cloud Computing services. +Cloud VM|[stackscale.my-netdata.io](https://stackscale.my-netdata.io)|**[stackscale.com](https://www.stackscale.com/)**|**StackScale.com** donated a VM with 4 CPU cores, 16GB RAM and 100GB HD in their excellent Cloud Computing services. + +Thank you! + +--- + +**Do you want to donate?** We are thirsty for on-line services that can help us make netdata better. We also try to build a network of demo sites (VMs) that can help us show the full potential of netdata. + +Please contact me at costa@tsaousis.gr. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FDonations-netdata-has-received&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md new file mode 100644 index 000000000..cc58634f1 --- /dev/null +++ b/docs/GettingStarted.md @@ -0,0 +1,182 @@ +# Getting Started + +These are your first steps **after** you have installed netdata. If you haven't installed it already, please check the [installation page](../packaging/installer). + +## Accessing the dashboard + +To access the netdata dashboard, navigate with your browser to: + +``` +http://your.server.ip:19999/ +``` + +
Click here, if it does not work. + +**Verify Netdata is running.** + +Open an ssh session to the server and execute `sudo ps -e | grep netdata`. It should respond with the PID of the netdata daemon. If it prints nothing, Netdata is not running. Check the [installation page](../packaging/installer) to install it. + +**Verify Netdata responds to HTTP requests.** + +Using the same ssh session, execute `curl -Ss http://localhost:19999`. It should dump on your screen the `index.html` page of the dashboard. If it does not, check the [installation page](../packaging/installer) to install it. + +**Verify Netdata receives the HTTP requests.** + +On the same ssh session, execute `tail -f /var/log/netdata/access.log` (if you installed the static 64bit package, use: `tail -f /opt/netdata/var/log/netdata/access.log`). This command will print on your screen all HTTP requests Netdata receives. + +Next, try to access the dashboard using your web browser, using the URL posted above. If nothing is printed on your terminal, the HTTP request is not routed to your Netdata. + +If you are not sure about your server IP, run this for a hint: `ip route get 8.8.8.8 | grep -oP " src [0-9\.]+ "`. It should print the IP of your server. + +If still Netdata does not receive the requests, something is blocking them. A firewall possibly. Please check your network. + +
 
+ +When you install multiple Netdata servers, all your servers will appear at the `my-netdata` menu at the top left of the dashboard. For this to work, you have to manually access just once, the dashboard of each of your netdata servers. + +The `my-netdata` menu is more than just browser bookmarks. When switching Netdata servers from that menu, any settings of the current view are propagated to the other netdata server: + +- the current charts panning (drag the charts left or right), +- the current charts zooming (`SHIFT` + mouse wheel over a chart), +- the highlighted time-frame (`ALT` + select an area on a chart), +- the scrolling position of the dashboard, +- the theme you use, +- etc. + +are all sent over to other netdata server, to allow you troubleshoot cross-server performance issues easily. + +## Starting and stopping Netdata + +Netdata installer integrates Netdata to your init / systemd environment. + +To start/stop Netdata, depending on your environment, you should use: + +- `systemctl start netdata` and `systemctl stop netdata` +- `service netdata start` and `service netdata stop` +- `/etc/init.d/netdata start` and `/etc/init.d/netdata stop` + +Once netdata is installed, the installer configures it to start at boot and stop at shutdown. + +For more information about using these commands, consult your system documentation. + +## Sizing Netdata + +The default installation of netdata is configured for a small round-robin database: just 1 hour of data. Depending on the memory your system has and the amount you can dedicate to Netdata, you should adapt this. On production systems with limited RAM, we suggest to set this to 3-4 hours. For best results you should set this to 24 or 48 hours. + +For every hour of data, Netdata needs about 25MB of RAM. If you can dedicate about 100MB of RAM to netdata, you should set its database size to 4 hours. + +To do this, edit `/etc/netdata/netdata.conf` (or `/opt/netdata/etc/netdata/netdata.conf`) and set: + +``` +[global] + history = SECONDS +``` + +Make sure the `history` line is not commented (comment lines start with `#`). + +1 hour is 3600 seconds, so the number you need to set is the result of `HOURS * 3600`. + +!!! danger + Be careful when you set this on production systems. If you set it too high, your system may run out of memory. By default, netdata is configured to be killed first when the system starves for memory, but better be careful to avoid issues. + +For more information about Netdata memory requirements, [check this page](../database). + +If your kernel supports KSM (most do), you can [enable KSM to half netdata memory requirement](../database#ksm). + +## Service discovery and auto-detection + +Netdata supports auto-detection of data collection sources. It auto-detects almost everything: database servers, web servers, dns server, etc. + +This auto-detection process happens **only once**, when netdata starts. To have Netdata re-discover data sources, you need to restart it. There are a few exceptions to this: + +- containers and VMs are auto-detected forever (when Netdata is running at the host). +- many data sources are collected but are silenced by default, until there is useful information to collect (for example network interface dropped packet, will appear after a packet has been dropped). +- services that are not optimal to collect on all systems, are disabled by default. +- services we received feedback from users that caused issues when monitored, are also disabled by default (for example, `chrony` is disabled by default, because CentOS ships a version of it that uses 100% CPU when queried for statistics). + +Once a data collection source is detected, netdata will never quit trying to collect data from it, until Netdata is restarted. So, if you stop your web server, netdata will pick it up automatically when it is started again. + +Since Netdata is installed on all your systems (even inside containers), auto-detection is limited to `localhost`. This simplifies significantly the security model of a Netdata monitored infrastructure, since most applications allow `localhost` access by default. + +A few well known data collection sources that commonly need to be configured are: + +- [systemd services utilization](../collectors/cgroups.plugin/#monitoring-systemd-services) are not exposed by default on most systems, so `systemd` has to be configured to expose those metrics. + +## Configuration quick start + +In netdata we have: + +- **internal** data collection plugins (running inside the netdata daemon) +- **external** data collection plugins (independent processes, sending data to netdata over pipes) +- modular plugin **orchestrators** (external plugins that have multiple data collection modules) + +You can enable and disable plugins (internal and external) via `netdata.conf` at the section `[plugins]`. + +All plugins have dedicated sections in `netdata.conf`, like `[plugin:XXX]` for overwriting their default data collection frequency and providing additional command line options to them. + +All external plugins have their own `.conf` file. + +All modular plugin orchestrators have a directory in `/etc/netdata` with a `.conf` file for each of their modules. + +It is complex. So, let's see the whole configuration tree for the `nginx` module of `python.d.plugin`: + +In `netdata.conf` at the `[plugins]` section, `python.d.plugin` can be enabled or disabled: + +``` +[plugins] + python.d = yes +``` + +In `netdata.conf` at the `[plugin:python.d]` section, we can provide additional command line options for `python.d.plugin` and overwite its data collection frequency: + +``` +[plugin:python.d] + update every = 1 + command options = +``` + +`python.d.plugin` has its own configuration file for enabling and disabling its modules (here you can disable `nginx` for example): + +```bash +sudo /etc/netdata/edit-config python.d.conf +``` + +Then, `nginx` has its own configuration file for configuring its data collection jobs (most modules can collect data from multiple sources, so the `nginx` module can collect metrics from multiple, local or remote, `nginx` servers): + +```bash +sudo /etc/netdata/edit-config python.d/nginx.conf +``` + +## Health monitoring and alarms + +Netdata ships hundreds of health monitoring alarms for detecting anomalies. These are optimized for production servers. + +Many users install netdata on workstations and are frustrated by the default alarms shipped with netdata. On these cases, we suggest to disable health monitoring. + +To disable it, edit `/etc/netdata/netdata.conf` (or `/opt/netdata/etc/netdata/netdata.conf` if you installed the static 64bit package) and set: + +``` +[health] + enabled = no +``` + +The above will disable health monitoring entirely. + +If you want to keep health monitoring enabled for the dashboard, but you want to disable email notifications, run this: + +```bash +sudo /etc/netdata/edit-config health_alarm_notify.conf +``` + +and set `SEND_EMAIL="NO"`. + +(For static 64bit installations use `sudo /opt/netdata/etc/netdata/edit-config health_alarm_notify.conf`). + +## What is next? + +- Check [Data Collection](../collectors) for configuring data collection plugins. +- Check [Health Monitoring](../health) for configuring your own alarms, or setting up alarm notifications. +- Check [Streaming](../streaming) for centralizing netdata metrics. +- Check [Backends](../backends) for long term archiving of netdata metrics to time-series databases. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FGettingStarted&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Netdata-Security-and-Disclosure-Information.md b/docs/Netdata-Security-and-Disclosure-Information.md new file mode 100644 index 000000000..8e8a66afc --- /dev/null +++ b/docs/Netdata-Security-and-Disclosure-Information.md @@ -0,0 +1,39 @@ +# Netdata Security and Disclosure Information + +This page describes netdata security and disclosure information. + +## Security Announcements + +Every time a security issue is fixed in netdata, we immediately release a new version of it. So, to get notified of all security incidents, please subscribe to our releases on github. + +## Report a Vulnerability + +We’re extremely grateful for security researchers and users that report vulnerabilities to Netdata Open Source Community. All reports are thoroughly investigated by a set of community volunteers. + +To make a report, please email the private [security@netdata.cloud](mailto:security@netdata.cloud) list with the security details and the details expected for [all netdata bug reports](../.github/ISSUE_TEMPLATE/bug_report.md). + +## When Should I Report a Vulnerability? + +- You think you discovered a potential security vulnerability in Netdata +- You are unsure how a vulnerability affects Netdata +- You think you discovered a vulnerability in another project that Netdata depends on (e.g. python, node, etc) + +### When Should I NOT Report a Vulnerability? + +- You need help tuning Netdata for security +- You need help applying security related updates +- Your issue is not security related + +## Security Vulnerability Response + +Each report is acknowledged and analyzed by Netdata Team members within 3 working days. This will set off a Security Release Process. + +Any vulnerability information shared with Netdata Team stays within Netdata project and will not be disseminated to other projects unless it is necessary to get the issue fixed. + +As the security issue moves from triage, to identified fix, to release planning we will keep the reporter updated. + +## Public Disclosure Timing + +A public disclosure date is negotiated by the Netdata team and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. As a basic default, we expect report date to disclosure date to be on the order of 7 days. The Netdata team holds the final say when setting a disclosure date. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FNetdata-Security-and-Disclosure-Information&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Performance.md b/docs/Performance.md new file mode 100644 index 000000000..b08549f11 --- /dev/null +++ b/docs/Performance.md @@ -0,0 +1,224 @@ +# Performance + +netdata performance is affected by: + +**Data collection** +- the number of charts for which data are collected +- the number of plugins running +- the technology of the plugins (i.e. BASH plugins are slower than binary plugins) +- the frequency of data collection + +You can control all the above. + +**Web clients accessing the data** +- the duration of the charts in the dashboard +- the number of charts refreshes requested +- the compression level of the web responses + +--- + +## Netdata Daemon + +For most server systems, with a few hundred charts and a few thousand dimensions, the netdata daemon, without any web clients accessing it, should not use more than 1% of a single core. + +To prove netdata scalability, check issue [#1323](https://github.com/netdata/netdata/issues/1323#issuecomment-265501668) where netdata collects 95.000 metrics per second, with 12% CPU utilization of a single core! + +In embedded systems, if the netdata daemon is using a lot of CPU without any web clients accessing it, you should lower the data collection frequency. To set the data collection frequency, edit `/etc/netdata/netdata.conf` and set `update_every` to a higher number (this is the frequency in seconds data are collected for all charts: higher number of seconds = lower frequency, the default is 1 for per second data collection). You can also set this frequency per module or chart. Check the [daemon configuration](../daemon/config) for plugins and charts. For specific modules, the configuration needs to be changed in: +- `python.d.conf` for [python](../collectors/python.d.plugin/#pythondplugin) +- `node.d.conf` for [nodejs](../collectors/node.d.plugin/#nodedplugin) +- `charts.d.conf` for [bash](../collectors/charts.d.plugin/#chartsdplugin) + +## Plugins + +If a plugin is using a lot of CPU, you should lower its update frequency, or if you wrote it, re-factor it to be more CPU efficient. Check [External Plugins](../collectors/plugins.d/) for more details on writing plugins. + +## CPU consumption when web clients are accessing dashboards + +Netdata is very efficient when servicing web clients. On most server platforms, netdata should be able to serve **1800 web client requests per second per core** for auto-refreshing charts. + +Normally, each user connected will request less than 10 chart refreshes per second (the page may have hundreds of charts, but only the visible are refreshed). So you can expect 180 users per CPU core accessing dashboards before having any delays. + +Netdata runs with the lowest possible process priority, so even if 1000 users are accessing dashboards, it should not influence your applications. CPU utilization will reach 100%, but your applications should get all the CPU they need. + +To lower the CPU utilization of netdata when clients are accessing the dashboard, set `web compression level = 1`, or disable web compression completely by setting `enable web responses gzip compression = no`. Both settings are in the `[web]` section. + + +## Monitoring a heavy loaded system + +Netdata, while running, does not depend on disk I/O (apart its log files and `access.log` is written with buffering enabled and can be disabled). Some plugins that need disk may stop and show gaps during heavy system load, but the netdata daemon itself should be able to work and collect values from `/proc` and `/sys` and serve web clients accessing it. + +Keep in mind that netdata saves its database when it exits and loads it back when restarted. While it is running though, its DB is only stored in RAM and no I/O takes place for it. + +## Netdata process priority + +By default, netdata runs with the `idle` process scheduler, which assigns CPU resources to netdata, only when the system has such resources to spare. + +The following `netdata.conf` settings control this: + +``` +[global] + process scheduling policy = idle + process scheduling priority = 0 + process nice level = 19 +``` + +The policies supported by netdata are `idle` (the netdata default), `other` (also as `nice`), `batch`, `rr`, `fifo`. netdata also recognizes `keep` and `none` to keep the current settings without changing them. + +For `other`, `nice` and `batch`, the setting `process nice level = 19` is activated to configure the nice level of netdata. Nice gets values -20 (highest) to 19 (lowest). + +For `rr` and `fifo`, the setting `process scheduling priority = 0` is activated to configure the priority of the relative scheduling policy. Priority gets values 1 (lowest) to 99 (highest). + +For the details of each scheduler, see `man sched_setscheduler` and `man sched`. + +When netdata is running under systemd, it can only lower its priority (the default is `other` with `nice level = 0`). If you want to make netdata to get more CPU than that, you will need to set in `netdata.conf`: + +``` +[global] + process scheduling policy = keep +``` + +and edit `/etc/systemd/system/netdata.service` and add: + +``` +CPUSchedulingPolicy=other | batch | idle | fifo | rr +CPUSchedulingPriority=99 +Nice=-10 +``` + +## Running netdata in embedded devices + +Embedded devices usually have very limited CPU resources available, and in most cases, just a single core. + +> keep in mind that netdata on RPi 2 and 3 does not require any tuning. The default settings will be good. The following tunables apply only when running netdata on RPi 1 or other very weak IoT devices. + +We suggest to do the following: + +### 1. Disable External plugins + +External plugins can consume more system resources than the netdata server. Disable the ones you don't need. If you need them, increase their `update every` value (again in `/etc/netdata/netdata.conf`), so that they do not run that frequently. + +Edit `/etc/netdata/netdata.conf`, find the `[plugins]` section: + +``` +[plugins] + proc = yes + + tc = no + idlejitter = no + cgroups = no + checks = no + apps = no + charts.d = no + node.d = no + python.d = no + + plugins directory = /usr/libexec/netdata/plugins.d + enable running new plugins = no + check for new plugins every = 60 +``` + +In detail: + +plugin|description +:---:|:--------- +`proc`|the internal plugin used to monitor the system. Normally, you don't want to disable this. You can disable individual functions of it at the next section. +`tc`|monitoring network interfaces QoS (tc classes) +`idlejitter`|internal plugin (written in C) that attempts show if the systems starved for CPU. Disabling it will eliminate a thread. +`cgroups`|monitoring linux containers. Most probably you are not going to need it. This will also eliminate another thread. +`checks`|a debugging plugin, which is disabled by default. +`apps`|a plugin that monitors system processes. It is very complex and heavy (consumes twice the CPU resources of the netdata daemon), so if you don't need to monitor the process tree, you can disable it. +`charts.d`|BASH plugins (squid, nginx, mysql, etc). This is a heavy plugin, that consumes twice the CPU resources of the netdata daemon. +`node.d`|node.js plugin, currently used for SNMP data collection and monitoring named (the name server). +`python.d`|has many modules and can use over 20MB of memory. + +For most IoT devices, you can disable all plugins except `proc`. For `proc` there is another section that controls which functions of it you need. Check the next section. + +--- + +### 2. Disable internal plugins + +In this section you can select which modules of the `proc` plugin you need. All these are run in a single thread, one after another. Still, each one needs some RAM and consumes some CPU cycles. With all the modules enabled, the `proc` plugin adds ~9 MiB on top of the 5 MiB required by the netdata daemon. + +``` +[plugin:proc] + # /proc/net/dev = yes # network interfaces + # /proc/diskstats = yes # disks +... +``` + +Refer to the [proc.plugins documentation](../collectors/proc.plugin/) for the list and description of all the proc plugin modules. + +### 3. Lower internal plugin update frequency + +If netdata is still using a lot of CPU, lower its update frequency. Going from per second updates, to once every 2 seconds updates, will cut the CPU resources of all netdata programs **in half**, and you will still have very frequent updates. + +If the CPU of the embedded device is too weak, try setting even lower update frequency. Experiment with `update every = 5` or `update every = 10` (higher number = lower frequency) in `netdata.conf`, until you get acceptable results. + +Keep in mind this will also force dashboard chart refreshes to happen at the same rate. So increasing this number actually lowers data collection frequency but also lowers dashboard chart refreshes frequency. + +This is a dashboard on a device with `[global].update every = 5` (this device is a media player and is now playing a movie): + +![pi1](https://cloud.githubusercontent.com/assets/2662304/15338489/ca84baaa-1c88-11e6-9ab2-118208e11ce1.gif) + +### 4. Disable logs + +Normally, you will not need them. To disable them, set: + +``` +[global] + debug log = none + error log = none + access log = none +``` +### 5. Set memory mode to RAM + +Setting the memory mode to `ram` will disable loading and saving the round robin database. This will not affect anything while running netdata, but it might be required if you have very limited storage available. + +``` +[global] + memory mode = ram +``` + +### 6. Use the single threaded web server + +Normally, netdata spawns a thread for each web client. This allows netdata to utilize all the available cores for servicing chart refreshes. You can however disable this feature and serve all charts one after another, using a single thread / core. This will might lower the CPU pressure on the embedded device. To enable the single threaded web server, edit `/etc/netdata/netdata.conf` and set `mode = single-threaded` in the `[web]` section. + +### 7. Lower memory requirements + +You can set the default size of the round robin database for all charts, using: + +``` +[global] + history = 600 +``` + +The units for history is `[global].update every` seconds. So if `[global].update every = 6` and `[global].history = 600`, you will have an hour of data ( 6 x 600 = 3.600 ), which will store 600 points per dimension, one every 6 seconds. + +Check also [Database](../database) for directions on calculating the size of the round robin database. + + +### 8. Disable gzip compression of responses + +Gzip compression of the web responses is using more CPU that the rest of netdata. You can lower the compression level or disable gzip compression completely. You can disable it, like this: + +``` +[web] + enable gzip compression = no +``` + +To lower the compression level, do this: + +``` +[web] + enable gzip compression = yes + gzip compression level = 1 +``` + +Finally, if no web server is installed on your device, you can use port tcp/80 for netdata: + +``` +[web] + port = 80 +``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FPerformance&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Running-behind-apache.md b/docs/Running-behind-apache.md new file mode 100644 index 000000000..7838665cd --- /dev/null +++ b/docs/Running-behind-apache.md @@ -0,0 +1,270 @@ +# Netdata via apache's mod_proxy + +Below you can find instructions for configuring an apache server to: + +1. proxy a single netdata via an HTTP and HTTPS virtual host +2. dynamically proxy any number of netdata +3. add user authentication +4. adjust netdata settings to get optimal results + + +## Requirements + +Make sure your apache has installed `mod_proxy` and `mod_proxy_http`. + +On debian/ubuntu systems, install them with this: + +```sh +sudo apt-get install libapache2-mod-proxy-html +``` + +Also make sure they are enabled: + +``` +sudo a2enmod proxy +sudo a2enmod proxy_http +``` + +Ensure your rewrite module is enabled: + +``` +sudo a2enmod rewrite +``` + +--- + +## netdata on an existing virtual host + +On any **existing** and already **working** apache virtual host, you can redirect requests for URL `/netdata/` to one or more netdata servers. + +### proxy one netdata, running on the same server apache runs + +Add the following on top of any existing virtual host. It will allow you to access netdata as `http://virtual.host/netdata/`. + +``` + + + RewriteEngine On + ProxyRequests Off + ProxyPreserveHost On + + + Require all granted + + + # Local netdata server accessed with '/netdata/', at localhost:19999 + ProxyPass "/netdata/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on + ProxyPassReverse "/netdata/" "http://localhost:19999/" + + # if the user did not give the trailing /, add it + # for HTTP (if the virtualhost is HTTP, use this) + RewriteRule ^/netdata$ http://%{HTTP_HOST}/netdata/ [L,R=301] + # for HTTPS (if the virtualhost is HTTPS, use this) + #RewriteRule ^/netdata$ https://%{HTTP_HOST}/netdata/ [L,R=301] + + # rest of virtual host config here + + +``` + +### proxy multiple netdata running on multiple servers + +Add the following on top of any existing virtual host. It will allow you to access multiple netdata as `http://virtual.host/netdata/HOSTNAME/`, where `HOSTNAME` is the hostname of any other netdata server you have (to access the `localhost` netdata, use `http://virtual.host/netdata/localhost/`). + +``` + + + RewriteEngine On + ProxyRequests Off + ProxyPreserveHost On + + + Require all granted + + + # proxy any host, on port 19999 + ProxyPassMatch "^/netdata/([A-Za-z0-9\._-]+)/(.*)" "http://$1:19999/$2" connectiontimeout=5 timeout=30 keepalive=on + + # make sure the user did not forget to add a trailing / + # for HTTP (if the virtualhost is HTTP, use this) + RewriteRule "^/netdata/([A-Za-z0-9\._-]+)$" http://%{HTTP_HOST}/netdata/$1/ [L,R=301] + # for HTTPS (if the virtualhost is HTTPS, use this) + RewriteRule "^/netdata/([A-Za-z0-9\._-]+)$" https://%{HTTP_HOST}/netdata/$1/ [L,R=301] + + # rest of virtual host config here + + +``` + +> IMPORTANT
+> The above config allows your apache users to connect to port 19999 on any server on your network. + +If you want to control the servers your users can connect to, replace the `ProxyPassMatch` line with the following. This allows only `server1`, `server2`, `server3` and `server4`. + +``` + ProxyPassMatch "^/netdata/(server1|server2|server3|server4)/(.*)" "http://$1:19999/$2" connectiontimeout=5 timeout=30 keepalive=on +``` + +## netdata on a dedicated virtual host + +You can proxy netdata through apache, using a dedicated apache virtual host. + +Create a new apache site: + +```sh +nano /etc/apache2/sites-available/netdata.conf +``` + +with this content: + +``` + + RewriteEngine On + ProxyRequests Off + ProxyPreserveHost On + + ServerName netdata.domain.tld + + + Require all granted + + + ProxyPass "/" "http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on + ProxyPassReverse "/" "http://localhost:19999/" + + ErrorLog ${APACHE_LOG_DIR}/netdata-error.log + CustomLog ${APACHE_LOG_DIR}/netdata-access.log combined + +``` + +Enable the VirtualHost: + +```sh +sudo a2ensite netdata.conf && service apache2 reload +``` + +## Netdata proxy in Plesk +_Assuming the main goal is to make Netdata running in HTTPS._ +1. Make a subdomain for Netdata on which you enable and force HTTPS - You can use a free Let's Encrypt certificate +2. Go to "Apache & nginx Settings", and in the following section, add: +``` +RewriteEngine on +RewriteRule (.*) http://localhost:19999/$1 [P,L] +``` +3. Optional: If your server is remote, then just replace "localhost" with your actual hostname or IP, it just works. + +Repeat the operation for as many servers as you need. + + +## Enable Basic Auth + +If you wish to add an authentication (user/password) to access your netdata, do these: + +Install the package `apache2-utils`. On debian / ubuntu run `sudo apt-get install apache2-utils`. + +Then, generate password for user `netdata`, using `htpasswd -c /etc/apache2/.htpasswd netdata` + +Modify the virtual host with these: + +``` + # replace the section + + Order deny,allow + Allow from all + + + # add a section + + AuthType Basic + AuthName "Protected site" + AuthUserFile /etc/apache2/.htpasswd + Require valid-user + Order deny,allow + Allow from all + +``` + +Specify `Location /` if netdata is running on dedicated virtual host. + +Note: Changes are applied by reloading or restarting Apache. + +# Netdata configuration + +You might edit `/etc/netdata/netdata.conf` to optimize your setup a bit. For applying these changes you need to restart netdata. + +## Response compression + +If you plan to use netdata exclusively via apache, you can gain some performance by preventing double compression of its output (netdata compresses its response, apache re-compresses it) by editing `/etc/netdata/netdata.conf` and setting: + +``` +[web] + enable gzip compression = no +``` + +Once you disable compression at netdata (and restart it), please verify you receive compressed responses from apache (it is important to receive compressed responses - the charts will be more snappy). + +## Limit direct access to netdata + +You would also need to instruct netdata to listen only on `localhost`, `127.0.0.1` or `::1`. + +``` +[web] + bind to = localhost +``` +or +``` +[web] + bind to = 127.0.0.1 +``` +or +``` +[web] + bind to = ::1 +``` + +--- + +You can also use a unix domain socket. This will also provide a faster route between apache and netdata: + +``` +[web] + bind to = unix:/tmp/netdata.sock +``` +_note: netdata v1.8+ support unix domain sockets_ + +At the apache side, prepend the 2nd argument to `ProxyPass` with `unix:/tmp/netdata.sock|`, like this: + +``` +ProxyPass "/netdata/" "unix:/tmp/netdata.sock|http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on +``` + +--- + +If your apache server is not on localhost, you can set: + +``` +[web] + bind to = * + allow connections from = IP_OF_APACHE_SERVER +``` +_note: netdata v1.9+ support `allow connections from`_ + +`allow connections from` accepts [netdata simple patterns](../libnetdata/simple_pattern/) to match against the connection IP address. + +## prevent the double access.log + +apache logs accesses and netdata logs them too. You can prevent netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: + +``` +[global] + access log = none +``` + +## Troubleshooting mod_proxy + +Make sure the requests reach netdata, by examing `/var/log/netdata/access.log`. + +1. if the requests do not reach netdata, your apache does not forward them. +2. if the requests reach netdata by the URLs are wrong, you have not re-written them properly. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-apache&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Running-behind-caddy.md b/docs/Running-behind-caddy.md new file mode 100644 index 000000000..1b25b0a2e --- /dev/null +++ b/docs/Running-behind-caddy.md @@ -0,0 +1,29 @@ +# Netdata via Caddy + +To run netdata via [Caddy's proxying,](https://caddyserver.com/docs/proxy) set your Caddyfile up like this: + +``` +netdata.domain.tld { + proxy / localhost:19999 +} +``` + +Other directives can be added between the curly brackets as needed. + +To run netdata in a subfolder: + +``` +netdata.domain.tld { + proxy /netdata/ localhost:19999 { + without /netdata + } +} +``` + +## limit direct access to netdata + +You would also need to instruct netdata to listen only to `127.0.0.1` or `::1`. + +To limit access to netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-caddy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Running-behind-lighttpd.md b/docs/Running-behind-lighttpd.md new file mode 100644 index 000000000..5c74439ad --- /dev/null +++ b/docs/Running-behind-lighttpd.md @@ -0,0 +1,62 @@ +# Netdata via lighttpd v1.4.x + +Here is a config for accessing netdata in a suburl via lighttpd 1.4.46 and newer: + +```txt +$HTTP["url"] =~ "^/netdata/" { + proxy.server = ( "" => ("netdata" => ( "host" => "127.0.0.1", "port" => 19999 ))) + proxy.header = ( "map-urlpath" => ( "/netdata/" => "/") ) +} +``` + +If you have older lighttpd you have to use a chain (such as bellow), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another). + +```txt +$HTTP["url"] =~ "^/netdata/" { + proxy.server = ( "" => ("" => ( "host" => "127.0.0.1", "port" => 19998 ))) +} + +$SERVER["socket"] == ":19998" { + url.rewrite-once = ( "^/netdata(.*)$" => "/$1" ) + proxy.server = ( "" => ( "" => ( "host" => "127.0.0.1", "port" => 19999 ))) +} +``` + +--- + +If the only thing the server is exposing via the web is netdata (and thus no suburl rewriting required), +then you can get away with just +``` +proxy.server = ( "" => ( ( "host" => "127.0.0.1", "port" => 19999 ))) +``` +Though if it's public facing you might then want to put some authentication on it. htdigest support +looks like: +``` +auth.backend = "htdigest" +auth.backend.htdigest.userfile = "/etc/lighttpd/lighttpd.htdigest" +auth.require = ( "" => ( "method" => "digest", + "realm" => "netdata", + "require" => "valid-user" + ) + ) +``` +other auth methods, and more info on htdigest, can be found in lighttpd's [mod_auth docs](http://redmine.lighttpd.net/projects/lighttpd/wiki/Docs_ModAuth). + +--- + +It seems that lighttpd (or some versions of it), fail to proxy compressed web responses. +To solve this issue, disable web response compression in netdata. + +Open /etc/netdata/netdata.conf and set in [global]: + +``` +enable web responses gzip compression = no +``` + +## limit direct access to netdata + +You would also need to instruct netdata to listen only to `127.0.0.1` or `::1`. + +To limit access to netdata only from localhost, set `bind socket to IP = 127.0.0.1` or `bind socket to IP = ::1` in `/etc/netdata/netdata.conf`. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-lighttpd&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Running-behind-nginx.md b/docs/Running-behind-nginx.md new file mode 100644 index 000000000..3918af243 --- /dev/null +++ b/docs/Running-behind-nginx.md @@ -0,0 +1,204 @@ +# Netdata via nginx + +To pass netdata via a nginx, use this: + +### As a virtual host + +``` +upstream backend { + # the netdata server + server 127.0.0.1:19999; + keepalive 64; +} + +server { + # nginx listens to this + listen 80; + + # the virtual host name of this + server_name netdata.example.com; + + location / { + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://backend; + proxy_http_version 1.1; + proxy_pass_request_headers on; + proxy_set_header Connection "keep-alive"; + proxy_store off; + } +} +``` + +### As a subfolder to an existing virtual host + +``` +upstream netdata { + server 127.0.0.1:19999; + keepalive 64; +} + +server { + listen 80; + + # the virtual host name of this subfolder should be exposed + #server_name netdata.example.com; + + location = /netdata { + return 301 /netdata/; + } + + location ~ /netdata/(?.*) { + proxy_redirect off; + proxy_set_header Host $host; + + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_http_version 1.1; + proxy_pass_request_headers on; + proxy_set_header Connection "keep-alive"; + proxy_store off; + proxy_pass http://netdata/$ndpath$is_args$args; + + gzip on; + gzip_proxied any; + gzip_types *; + } +} +``` + +### As a subfolder for multiple netdata servers, via one nginx + +``` +upstream backend-server1 { + server 10.1.1.103:19999; + keepalive 64; +} +upstream backend-server2 { + server 10.1.1.104:19999; + keepalive 64; +} + +server { + listen 80; + + # the virtual host name of this subfolder should be exposed + #server_name netdata.example.com; + + location ~ /netdata/(?.*)/(?.*) { + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_http_version 1.1; + proxy_pass_request_headers on; + proxy_set_header Connection "keep-alive"; + proxy_store off; + proxy_pass http://backend-$behost/$ndpath$is_args$args; + + gzip on; + gzip_proxied any; + gzip_types *; + } + + # make sure there is a trailing slash at the browser + # or the URLs will be wrong + location ~ /netdata/(?.*) { + return 301 /netdata/$behost/; + } +} +``` + +Of course you can add as many backend servers as you like. + +Using the above, you access netdata on the backend servers, like this: + +- `http://nginx.server/netdata/server1/` to reach `backend-server1` +- `http://nginx.server/netdata/server2/` to reach `backend-server2` + + +### Enable authentication + +Create an authentication file to enable the nginx basic authentication. +Do not use authentication without SSL/TLS! +If you haven't one you can do the following: + +``` +printf "yourusername:$(openssl passwd -apr1)" > /etc/nginx/passwords +``` + +And enable the authentication inside your server directive: + +``` +server { + # ... + auth_basic "Protected"; + auth_basic_user_file passwords; + # ... +} +``` + +## limit direct access to netdata + +If your nginx is on `localhost`, you can use this to protect your netdata: + +``` +[web] + bind to = 127.0.0.1 ::1 +``` + +--- + +You can also use a unix domain socket. This will also provide a faster route between nginx and netdata: + +``` +[web] + bind to = unix:/tmp/netdata.sock +``` +_note: netdata v1.8+ support unix domain sockets_ + +At the nginx side, use something like this to use the same unix domain socket: + +``` +upstream backend { + server unix:/tmp/netdata.sock; + keepalive 64; +} +``` + +--- + +If your nginx server is not on localhost, you can set: + +``` +[web] + bind to = * + allow connections from = IP_OF_NGINX_SERVER +``` + +_note: netdata v1.9+ support `allow connections from`_ + +`allow connections from` accepts [netdata simple patterns](../libnetdata/simple_pattern/) to match against the connection IP address. + +## prevent the double access.log + +nginx logs accesses and netdata logs them too. You can prevent netdata from generating its access log, by setting this in `/etc/netdata/netdata.conf`: + +``` +[global] + access log = none +``` + +## SELinux + +If you get an 502 Bad Gateway error you might check your nginx error log: + +```sh +# cat /var/log/nginx/error.log: +2016/09/09 12:34:05 [crit] 5731#5731: *1 connect() to 127.0.0.1:19999 failed (13: Permission denied) while connecting to upstream, client: 1.2.3.4, server: netdata.example.com, request: "GET / HTTP/2.0", upstream: "http://127.0.0.1:19999/", host: "netdata.example.com" +``` + +If you see something like the above, chances are high that SELinux prevents nginx from connecting to the backend server. To fix that, just use this policy: `setsebool -P httpd_can_network_connect true`. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FRunning-behind-nginx&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/Third-Party-Plugins.md b/docs/Third-Party-Plugins.md new file mode 100644 index 000000000..38fa90e4e --- /dev/null +++ b/docs/Third-Party-Plugins.md @@ -0,0 +1,31 @@ +# Third-party plugins + +The following is a list of Netdata plugins distributed by third parties: + +## Nvidia GPUs + +[netdata nv plugin](https://github.com/coraxx/netdata_nv_plugin) monitors nvidia GPUs. + +![image](https://user-images.githubusercontent.com/2662304/29516895-351e905e-867b-11e7-9863-3fb6924490ab.png) + +## teamspeak 3 + +[teamspeak 3 plugin](https://github.com/coraxx/netdata_ts3_plugin) polls active users and bandwidth from TeamSpeak 3 servers. + +## SSH + +[SSH module](https://github.com/Yaser-Amiri/netdata-ssh-module) monitors failed authentication requests of SSH server. + +## interactive users count + +Collect [number of currently logged-on users](https://github.com/veksh/netdata-numsessions) + +## CyberPower UPS + +[cyberups plugin](https://github.com/HawtDogFlvrWtr/netdata_cyberpwrups_plugin) polls the USB connected CyberPower UPS for stats. + +## Nim + +There is an unofficial [nim plugin helper](https://github.com/FedericoCeratto/nim-netdata-plugin) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FThird-Party-Plugins&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/a-github-star-is-important.md b/docs/a-github-star-is-important.md new file mode 100644 index 000000000..e46d56449 --- /dev/null +++ b/docs/a-github-star-is-important.md @@ -0,0 +1,15 @@ +# A GitHub star is important + +**GitHub stars** allow netdata to expand its reach, its community, especially attract people with skills willing to contribute to it. + +Compared to its first release, netdata is now **twice as fast**, has all its bugs settled and a lot more functionality. This happened because a lot of people find it useful, use it daily at home and work, **rely on it** and **contribute to it**. + +**GitHub stars** also **motivate** us. They state that you find our work **useful**. They give us strength to continue, to work **harder** to make it even **better**. + +So, give netdata a **GitHub star**, at the top right of this page. + +Thank you! + +Costa Tsaousis + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fa-github-star-is-important&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/anonymous-statistics.md b/docs/anonymous-statistics.md new file mode 100644 index 000000000..1e426e2c5 --- /dev/null +++ b/docs/anonymous-statistics.md @@ -0,0 +1,62 @@ +# Anonymous Statistics + +From Netdata v1.12 and above, anonymous usage information is collected by default and send to Google Analytics. +The statistics calculated from this information will be used for: + +1. **Quality assurance**, to help us understand if netdata behaves as expected and help us identify repeating issues for certain distributions or environment. + +2. **Usage statistics**, to help us focus on the parts of netdata that are used the most, or help us identify the extend our development decisions influence the community. + +Information is sent to Netdata via two different channels: +- Google Tag Manager is used when an agent's dashboard is accessed. +- The script `anonymous-statistics.sh` is executed by the Netdata daemon, when Netdata starts, stops cleanly, or fails. + +Both methods are controlled via the same [opt-out mechanism](#opt-out) + +## Google tag manager + +Google tag manager (GTM) is the recommended way of collecting statistics for new implementations using GA. Unlike the older API, the logic of when to send information to GA and what information to send is controlled centrally. + +We have configured GTM to trigger the tag only when the variable `anonymous_statistics` is true. The value of this variable is controlled via the [opt-out mechanism](#opt-out). + +To ensure anonymity of the stored information, we have configured GTM's GA variable "Fields to set" as follows: + +|Field Name|Value +|---|--- +|page|netdata-dashboard +|hostname|dashboard.my-netdata.io +|anonymizeIp|true +|title|netdata dashboard +|campaignSource|{{machine_guid}} +|campaignMedium|web +|referrer|http://dashboard.my-netdata.io +|Page URL|http://dashboard.my-netdata.io/netdata-dashboard +|Page Hostname|http://dashboard.my-netdata.io +|Page Path|/netdata-dashboard +|location|http://dashboard.my-netdata.io + +In addition, the netdata-generated unique machine guid is sent to GA via a custom dimension. +You can verify the effect of these settings by examining the GA `collect` request parameters. + +The only thing that's impossible for us to prevent from being **sent** is the URL in the "Referrer" Header of the browser request to GA. However, the settings above ensure that all **stored** URLs and host names are anonymized. + +## Anonymous Statistics Script + +Every time the daemon is started or stopped and every time a fatal condition is encountered, netdata uses the anonymous statistics script to collect system information and send it to GA via an http call. The information collected for all events is: + - Netdata version + - OS name, version, id, id_like + - Kernel name, version, architecture + - Virtualization technology + - Containerization technology + +Furthermore, the FATAL event sends the Netdata process & thread name, along with the source code function, source code filename and source code line number of the fatal error. + +To see exactly what and how is collected, you can review the script template `daemon/anonymous-statistics.sh.in`. The template is converted to a bash script called `anonymous-statistics.sh`, installed under the Netdata `plugins directory`, which is usually `/usr/libexec/netdata/plugins.d`. + +## Opt-Out + +To opt-out from sending anonymous statistics, you can create a file called `.opt-out-from-anonymous-statistics` under the user configuration directory (usually `/etc/netdata`). The effect of creating the file is the following: +- The daemon will never execute the anonymous statistics script +- The anonymous statistics script will exit immediately if called via any other way (e.g. shell) +- The Google Tag Manager Javascript snippet will remain in the page, but the linked tag will not be fired. The effect is that no data will ever be sent to GA. + diff --git a/docs/configuration-guide.md b/docs/configuration-guide.md new file mode 100644 index 000000000..4c82c0583 --- /dev/null +++ b/docs/configuration-guide.md @@ -0,0 +1,122 @@ +# Configuration guide + +No configuration is required to run netdata, but you will find plenty of options to tweak, so that you can adapt it to your particular needs. + +
Configuration files are placed in `/etc/netdata`. +Depending on your installation method, Netdata will have been installed either directly under `/`, or under `/opt/netdata`. The paths mentioned here and in the documentation in general assume that your installation is under `/`. If it is not, you will find the exact same paths under `/opt/netdata` as well. (i.e. `/etc/netdata` will be `/opt/netdata/etc/netdata`).
+ +Under that directory you will see the following: + +- `netdata.conf` is [the main configuration file](../daemon/config/#daemon-configuration) +- `edit-config` is an sh script that you can use to easily and safely edit the configuration. Just run it to see its usage. +- Other directories, initially empty, where your custom configurations for alarms and collector plugins/modules will be copied from the stock configuration, if and when you customize them using `edit-config`. +- `orig` is a symbolic link to the directory `/usr/lib/netdata/conf.d`, which contains the stock configurations for everything not included in `netdata.conf`: + - `health_alarm_notify.conf` is where you configure how and to who Netdata will send [alarm notifications](../health/notifications/#netdata-alarm-notifications). + - `health.d` is the directory that contains the alarm triggers for [health monitoring](../health/#health-monitoring). It contains one .conf file per collector. + - The [modular plugin orchestrators](../collectors/plugins.d/#external-plugins-overview) have: + - One config file each, mainly to turn their modules on and off: `python.d.conf` for [python](../collectors/python.d.plugin/#pythondplugin), `node.d.conf` for [nodejs](../collectors/node.d.plugin/#nodedplugin) and `charts.d.conf` for [bash](../collectors/charts.d.plugin/#chartsdplugin) modules. + - One directory each, where the module-specific configuration files can be found. + - `stream.conf` is where you configure [streaming and replication](../streaming/#streaming-and-replication) + - `stats.d` is a directory under which you can add .conf files to add [synthetic charts](../collectors/statsd.plugin/#synthetic-statsd-charts). + - Individual collector plugin config files, such as `fping.conf` for the [fping plugin](../collectors/fping.plugin/) and `apps_groups.conf` for the [apps plugin](../collectors/apps.plugin/) + +So there are many configuration files to control every aspect of Netdata's behavior. It can be overwhelming at first, but you won't have to deal with any of them, unless you have specific things you need to change. The following HOWTO will guide you on how to customize your netdata, based on what you want to do. + +## How to + +### Change what I see + +##### Increase the metrics retention period + +Increase `history` in [netdata.conf [global]](../daemon/config/#global-section-options). Just ensure you understand [how much memory will be required](../database/) + +##### Reduce the data collection frequency + +Increase `update every` in [netdata.conf [global]](../daemon/config/#global-section-options). This is another way to increase your metrics retention period, but at a lower resolution than the default 1s. + +##### Modify how a chart is displayed + +In `netdata.conf` under `# Per chart configuration` you will find several [[CHART_NAME] sections](../daemon/config/#per-chart-configuration), where you can control all aspects of a specific chart. + +##### Disable a collector + +Entire plugins can be turned off from the [netdata.conf [plugins]](../daemon/config/#plugins-section-options) section. To disable specific modules of a plugin orchestrator, you need to edit one of the following: +- `python.d.conf` for [python](../collectors/python.d.plugin/#pythondplugin) +- `node.d.conf` for [nodejs](../collectors/node.d.plugin/#nodedplugin) +- `charts.d.conf` for [bash](../collectors/charts.d.plugin/#chartsdplugin) + +### Modify alarms and notifications + +##### Add a new alarm + +You can add a new alarm definition either by editing an existing stock alarm config file under `health.d` (e.g. `/etc/netdata/edit-config health.d/load.conf`), or by adding a new `.conf` file under `/etc/netdata/health.d`. The documentation on how to define an alarm is in [health monitoring](../health/#health-monitoring). It is suggested to look at some of the stock alarm definitions, so you can ensure you understand how the various options work. + +##### Turn off all alarms and notifications + +Just set `enabled = no` in the [netdata.conf [health]](../daemon/config/#health-section-options) section + +##### Modify or disable a specific alarm + +The `health.d` directory that contains the alarm triggers for [health monitoring](../health/#health-monitoring). It has one .conf file per collector. You can easily find the .conf file you will need to modify, by looking for the "source" line on the table that appears on the right side of an alarm on the netdata gui. + +For example, if you click on Alarms and go to the tab 'All', the default netdata installation will show you at the top the configured alarm for `10 min cpu usage` (it's the name of the badge). Looking at the table on the right side, you will see a row that says: `source 4@/usr/lib/netdata/conf.d/health.d/cpu.conf`. This way, you know that you will need to run `/etc/netdata/edit-config health.d/cpu.conf` and look for alarm at line 4 of the conf file. + +As stated at the top of the .conf file, **you can disable an alarm notification by setting the 'to' line to: silent**. +To modify how the alarm gets triggered, we suggest that you go through the guide on [health monitoring](../health/#health-monitoring). + +##### Receive notifications using my preferred method + +You only need to configure `health_alarm_notify.conf`. To learn how to do it, read first [alarm notifications](../health/notifications/#netdata-alarm-notifications) and then open the submenu `Supported Notifications` under `Alarm notifications` in the documentation to find the specific page on your prefered notification method. + +### Make security-related customizations + +##### Change the netdata web server access lists + +You have several options under the [netdata.conf [web]](../web/server/#access-lists) section. + +##### Stop sending info to registry.my-netdata.io + +You will need to configure the [registry] section in netdata.conf. First read the [registry documentation](../registry/). In it, are instructions on how to [run your own registry](../registry/#run-your-own-registry). + +##### Change the IP address/port netdata listens to + +The settings are under netdata.conf [web]. Look at the [web server documentation](../web/server/#binding-netdata-to-multiple-ports) for more info. + +### System resource usage + +##### Reduce the resources netdata uses + +The page on [netdata performance](Performance.md) has an excellent guide on how to reduce the netdata cpu/disk/RAM utilization to levels suitable even for the weakest [IoT devices](netdata-for-IoT.md). + +##### Change when netdata saves metrics to disk + +[netdata.conf [global]](../daemon/config/#global-section-options) : `memory mode` + +##### Prevent netdata from getting immediately killed when my server runs out of memory + +You can change the netdata [OOM score](../daemon/#oom-score) in netdata.conf [global]. + +### Other + +##### Move netdata directories + +The various directory paths are in [netdata.conf [global]](../daemon/config/#global-section-options). + + +## How netdata configuration works + +The configuration files are `name = value` dictionaries with `[sections]`. Write whatever you like there as long as it follows this simple format. + +Netdata loads this dictionary and then when the code needs a value from it, it just looks up the `name` in the dictionary at the proper `section`. In all places, in the code, there are both the `names` and their `default values`, so if something is not found in the configuration file, the default is used. The lookup is made using B-Trees and hashes (no string comparisons), so they are super fast. Also the `names` of the settings can be `my super duper setting that once set to yes, will turn the world upside down = no` - so goodbye to most of the documentation involved. + +Next, netdata can generate a valid configuration for the user to edit. No need to remember anything. Just get the configuration from the server (`/netdata.conf` on your netdata server), edit it and save it. + +Last, what about options you believe you have set, but you misspelled?When you get the configuration file from the server, there will be a comment above all `name = value` pairs the server does not use. So you know that whatever you wrote there, is not used. + +## Netdata simple patterns + +Unix prefers regular expressions. But they are just too hard, too cryptic to use, write and understand. + +So, netdata supports [simple patterns](../libnetdata/simple_pattern/). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfiguration-guide&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/generator/buildhtml.sh b/docs/generator/buildhtml.sh new file mode 100755 index 000000000..3cc87d29f --- /dev/null +++ b/docs/generator/buildhtml.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +# buildhtml.sh + +# Builds the html static site, using mkdocs + +set -e + +# Assumes that the script is executed either from the htmldoc folder (by netlify), or from the root repo dir (as originally intended) +currentdir=$(pwd | awk -F '/' '{print $NF}') +echo "$currentdir" +if [ "$currentdir" = "generator" ]; then + cd ../.. +fi +GENERATOR_DIR="docs/generator" + +# Copy all netdata .md files to docs/generator/src. Exclude htmldoc itself and also the directory node_modules generatord by Netlify +echo "Copying files" +rm -rf ${GENERATOR_DIR}/src +find . -type d \( -path ./${GENERATOR_DIR} -o -path ./node_modules \) -prune -o -name "*.md" -print | cpio -pd ${GENERATOR_DIR}/src + +# Copy netdata html resources +cp -a ./${GENERATOR_DIR}/custom ./${GENERATOR_DIR}/src/ + +# Modify the first line of the main README.md, to enable proper static html generation +echo "Modifying README header" +sed -i -e '0,/# netdata /s//# Introduction\n\n/' ${GENERATOR_DIR}/src/README.md + +# Remove all GA tracking code +find ${GENERATOR_DIR}/src -name "*.md" -print0 | xargs -0 sed -i -e 's/\[!\[analytics.*UA-64295674-3)\]()//g' + +# Remove specific files that don't belong in the documentation +declare -a EXCLUDE_LIST=( + "HISTORICAL_CHANGELOG.md" + "contrib/sles11/README.md" + "packaging/maintainers/README.md" +) + +for f in "${EXCLUDE_LIST[@]}"; do + rm "${GENERATOR_DIR}/src/$f" +done + +echo "Creating mkdocs.yaml" + +# Generate mkdocs.yaml +${GENERATOR_DIR}/buildyaml.sh >${GENERATOR_DIR}/mkdocs.yml + +echo "Fixing links" + +# Fix links (recursively, all types, executing replacements) +${GENERATOR_DIR}/checklinks.sh -rax + +if [ "${1}" != "nomkdocs" ] ; then + echo "Calling mkdocs" + + # Build html docs + mkdocs build --config-file=${GENERATOR_DIR}/mkdocs.yml +fi + +echo "Finished" diff --git a/docs/generator/buildyaml.sh b/docs/generator/buildyaml.sh new file mode 100755 index 000000000..a86b1392e --- /dev/null +++ b/docs/generator/buildyaml.sh @@ -0,0 +1,238 @@ +#!/bin/bash + +GENERATOR_DIR="docs/generator" +cd ${GENERATOR_DIR}/src + +# create yaml nav subtree with all the files directly under a specific directory +# arguments: +# tabs - how deep do we show it in the hierarchy. Level 1 is the top level, max should probably be 3 +# directory - to get mds from to add them to the yaml +# file - can be left empty to include all files +# name - what do we call the relevant section on the navbar. Empty if no new section is required +# maxdepth - how many levels of subdirectories do I include in the yaml in this section. 1 means just the top level and is the default if left empty +# excludefirstlevel - Optional param. If passed, mindepth is set to 2, to exclude the READMEs in the first directory level + +navpart() { + tabs=$1 + dir=$2 + file=$3 + section=$4 + maxdepth=$5 + excludefirstlevel=$6 + spc="" + + i=1 + while [ ${i} -lt ${tabs} ]; do + spc=" $spc" + i=$((i + 1)) + done + + if [ -z "$file" ]; then file='*'; fi + if [[ -n $section ]]; then echo "$spc- ${section}:"; fi + if [ -z "$maxdepth" ]; then maxdepth=1; fi + if [[ -n $excludefirstlevel ]]; then mindepth=2; else mindepth=1; fi + + for f in $(find $dir -mindepth $mindepth -maxdepth $maxdepth -name "${file}.md" -printf '%h\0%d\0%p\n' | sort -t '\0' -n | awk -F '\0' '{print $3}'); do + # If I'm adding a section, I need the child links to be one level deeper than the requested level in "tabs" + if [ -z "$section" ]; then + echo "$spc- '$f'" + else + echo "$spc - '$f'" + fi + done +} + +echo -e 'site_name: Netdata Documentation +repo_url: https://github.com/netdata/netdata +repo_name: GitHub +edit_uri: blob/master +site_description: Netdata Documentation +copyright: Netdata, 2018 +docs_dir: src +site_dir: build +#use_directory_urls: false +strict: true +extra: + social: + - type: "github" + link: "https://github.com/netdata/netdata" + - type: "twitter" + link: "https://twitter.com/linuxnetdata" + - type: "facebook" + link: "https://www.facebook.com/linuxnetdata/" +theme: + name: "material" + custom_dir: custom/themes/material + favicon: custom/img/favicon.ico +extra_css: + - "https://cdnjs.cloudflare.com/ajax/libs/cookieconsent2/3.1.0/cookieconsent.min.css" + - "custom/css/netdata.css" +extra_javascript: + - "custom/javascripts/cookie-consent.js" + - "https://cdnjs.cloudflare.com/ajax/libs/cookieconsent2/3.1.0/cookieconsent.min.js" +markdown_extensions: + - extra + - abbr + - attr_list + - def_list + - fenced_code + - footnotes + - tables + - admonition + - codehilite + - meta + - nl2br + - sane_lists + - smarty + - toc: + permalink: True + separator: "-" + - wikilinks + - pymdownx.arithmatex + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.critic + - pymdownx.details + - pymdownx.inlinehilite + - pymdownx.magiclink + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.superfences + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde + - pymdownx.betterem + - pymdownx.superfences + - markdown.extensions.footnotes + - markdown.extensions.attr_list + - markdown.extensions.def_list + - markdown.extensions.tables + - markdown.extensions.abbr + - pymdownx.extrarawhtml +nav:' + +navpart 1 . README "About" + +echo -ne " - 'docs/Demo-Sites.md' + - 'docs/netdata-security.md' + - 'docs/anonymous-statistics.md' + - 'docs/Donations-netdata-has-received.md' + - 'docs/a-github-star-is-important.md' + - REDISTRIBUTED.md + - CHANGELOG.md + - CONTRIBUTING.md +- Why Netdata: + - 'docs/why-netdata/README.md' + - 'docs/why-netdata/1s-granularity.md' + - 'docs/why-netdata/unlimited-metrics.md' + - 'docs/why-netdata/meaningful-presentation.md' + - 'docs/why-netdata/immediate-results.md' +- Installation: + - 'packaging/installer/README.md' + - 'packaging/docker/README.md' + - 'packaging/installer/UPDATE.md' + - 'packaging/installer/UNINSTALL.md' +- 'docs/GettingStarted.md' +- Running netdata: + - 'daemon/README.md' + - 'docs/configuration-guide.md' + - 'daemon/config/README.md' + - 'docs/Charts.md' +" +navpart 2 web/server "" "Web server" +navpart 3 web/server "" "" 2 excludefirstlevel +echo -ne " - Running behind another web server: + - 'docs/Running-behind-nginx.md' + - 'docs/Running-behind-apache.md' + - 'docs/Running-behind-lighttpd.md' + - 'docs/Running-behind-caddy.md' +" +#navpart 2 system +navpart 2 database +navpart 2 registry + +echo -ne " - 'docs/Performance.md' + - 'docs/netdata-for-IoT.md' + - 'docs/high-performance-netdata.md' +" + +navpart 1 collectors "" "Data collection" 1 +echo -ne " - 'docs/Add-more-charts-to-netdata.md' + - Internal plugins: +" +navpart 3 collectors/apps.plugin +navpart 3 collectors/proc.plugin +navpart 3 collectors/statsd.plugin +navpart 3 collectors/cgroups.plugin +navpart 3 collectors/idlejitter.plugin +navpart 3 collectors/tc.plugin +navpart 3 collectors/nfacct.plugin +navpart 3 collectors/checks.plugin +navpart 3 collectors/diskspace.plugin +navpart 3 collectors/freebsd.plugin +navpart 3 collectors/macos.plugin + +navpart 2 collectors/plugins.d "" "External plugins" +navpart 3 collectors/python.d.plugin "" "Python modules" 3 +navpart 3 collectors/node.d.plugin "" "Node.js modules" 3 +echo -ne " - BASH modules: + - 'collectors/charts.d.plugin/README.md' + - 'collectors/charts.d.plugin/ap/README.md' + - 'collectors/charts.d.plugin/apcupsd/README.md' + - 'collectors/charts.d.plugin/example/README.md' + - 'collectors/charts.d.plugin/libreswan/README.md' + - 'collectors/charts.d.plugin/nut/README.md' + - 'collectors/charts.d.plugin/opensips/README.md' + - Obsolete BASH modules: + - 'collectors/charts.d.plugin/mem_apps/README.md' + - 'collectors/charts.d.plugin/postfix/README.md' + - 'collectors/charts.d.plugin/tomcat/README.md' + - 'collectors/charts.d.plugin/sensors/README.md' + - 'collectors/charts.d.plugin/cpu_apps/README.md' + - 'collectors/charts.d.plugin/squid/README.md' + - 'collectors/charts.d.plugin/nginx/README.md' + - 'collectors/charts.d.plugin/hddtemp/README.md' + - 'collectors/charts.d.plugin/cpufreq/README.md' + - 'collectors/charts.d.plugin/mysql/README.md' + - 'collectors/charts.d.plugin/exim/README.md' + - 'collectors/charts.d.plugin/apache/README.md' + - 'collectors/charts.d.plugin/load_average/README.md' + - 'collectors/charts.d.plugin/phpfpm/README.md' +" + +navpart 3 collectors/fping.plugin +navpart 3 collectors/freeipmi.plugin +navpart 3 collectors/cups.plugin + +echo -ne " - 'docs/Third-Party-Plugins.md' +" + +navpart 1 health README "Alarms and notifications" +navpart 2 health/notifications "" "" 1 +navpart 2 health/notifications "" "Supported notifications" 2 excludefirstlevel + +navpart 1 streaming "" "" 4 + +navpart 1 backends "" "Archiving to backends" 3 + +navpart 1 web "README" "Dashboards" +navpart 2 web/gui "" "" 3 + +navpart 1 web/api "" "HTTP API" +navpart 2 web/api/exporters "" "Exporters" 2 +navpart 2 web/api/formatters "" "Formatters" 2 +navpart 2 web/api/badges "" "" 2 +navpart 2 web/api/health "" "" 2 +navpart 2 web/api/queries "" "Queries" 2 + +echo -ne "- Hacking netdata: + - CODE_OF_CONDUCT.md + - 'docs/Netdata-Security-and-Disclosure-Information.md' + - CONTRIBUTORS.md +" +navpart 2 packaging/makeself "" "" 4 +navpart 2 libnetdata "" "libnetdata" 4 +navpart 2 contrib +navpart 2 tests "" "" 2 +navpart 2 diagrams/data_structures diff --git a/docs/generator/checklinks.sh b/docs/generator/checklinks.sh new file mode 100755 index 000000000..d0c3b165c --- /dev/null +++ b/docs/generator/checklinks.sh @@ -0,0 +1,394 @@ +#!/bin/bash +# shellcheck disable=SC2181 + +# Doc link checker +# Validates and tries to fix all links that will cause issues either in the repo, or in the html site + +GENERATOR_DIR="docs/generator" + +dbg () { + if [ "$VERBOSE" -eq 1 ] ; then printf "%s\\n" "${1}" ; fi +} + +printhelp () { + echo "Usage: docs/generator/checklinks.sh [-r OR -f ] [OPTIONS] + -r Recursively check all mds in all child directories, except docs/generator and node_modules (which is generatord by netlify) + -f Just check the passed md file + General Options: + -x Execute commands. By default the script runs in test mode with no files changed by the script (results and fixes are just shown). Use -x to have it apply the changes. + -u trys to follow URLs using curl + -v Outputs debugging messages + By default, nothing is actually checked. The following options tell it what to check: + -a Check all link types + -w Check wiki links (and just warn if you see one) + -b Check absolute links to the netdata repo (and change them to relative). Only checks links to https://github.com/netdata/netdata/????/master* + -l Check relative links to the netdata repo (and replace them with links that the html static site can live with, under docs/generator/src only) + -e Check external links, outside the wiki or the repo (useless without adding the -u option, to verify that they're not broken) + " +} + +fix () { + if [ "$EXECUTE" -eq 0 ] ; then + echo "-- SHOULD EXECUTE: $1" + else + dbg "-- EXECUTING: $1" + eval "$1" + fi +} + +ck_netdata_absolute () { + f=$1 + alnk=$2 + lnkinfile=$3 + testURL "$alnk" + + if [[ $f =~ ^(.*)/([^/]*)$ ]] ; then + fpath="${BASH_REMATCH[1]}" + dbg "-- Current file is at $fpath" + fi + + if [ $? -eq 0 ] ; then + rlnk=$(echo "$alnk" | sed 's/https:\/\/github.com\/netdata\/netdata\/....\/master\///g') + case $rlnk in + \#* ) dbg "-- (#somelink)" ;; + */ ) dbg "-- # (path/)" ;; + */#* ) dbg "-- # (path/#somelink)" ;; + */*.md ) dbg "-- # (path/filename.md)" ;; + */*.md#* ) dbg "-- # (path/filename.md#somelink)" ;; + *#* ) + dbg "-- # (path#somelink) -> (path/#somelink)" + if [[ $rlnk =~ ^(.*)#(.*)$ ]] ; then + dbg "-- $rlnk -> ${BASH_REMATCH[1]}/#${BASH_REMATCH[2]}" + rlnk="${BASH_REMATCH[1]}/#${BASH_REMATCH[2]}" + fi + ;; + * ) + if [ -f "$rlnk" ] ; then + dbg "-- # (path/someotherfile) $rlnk" + else + if [ -d "$rlnk" ] ; then + dbg "-- # (path) -> (path/)" + rlnk="$rlnk/" + else + echo "-- ERROR: $f - $alnk is neither a file nor a directory. Giving up!" + EXITCODE=1 + return + fi + fi + ;; + esac + + if [[ $rlnk =~ ^(.*)/([^/]*)$ ]] ; then + abspath="${BASH_REMATCH[1]}" + rest="${BASH_REMATCH[2]}" + dbg "-- Target file is at $abspath" + fi + relativelink=$(realpath --relative-to="$fpath" "$abspath") + if [ $? -eq 0 ] ; then + srch=$(echo "$lnkinfile" | sed 's/\//\\\//g') + if [ "$relativelink" = "." ] ; then + rplc=$(echo "$rest" | sed 's/\//\\\//g') + else + rplc=$(echo "$relativelink/$rest" | sed 's/\//\\\//g') + fi + fix "sed -i 's/($srch)/($rplc)/g' $f" + else + echo "-- ERROR: $f - Can't determine relative path of $alnk" + fi + else + echo "-- ERROR: $f - $alnk is a broken link" + EXITCODE=1 + return + fi +} + +testURL () { + if [ "$TESTURLS" -eq 0 ] ; then return 0 ; fi + dbg "-- Testing URL $1" + curl -sS "$1" > /dev/null + if [ $? -gt 0 ] ; then + return 1 + fi + return 0 +} + +testinternal () { + # Check if the header referred to by the internal link exists in the same file + ff=${1} + ifile=${2} + ilnk=${3} + header=${ilnk//-/} + dbg "-- Searching for \"$header\" in $ifile" + tr -d '[],_.:? `'< "$ifile" | sed 's/-//g' | grep -i "^\\#*$header\$" >/dev/null + if [ $? -eq 0 ] ; then + dbg "-- $ilnk found in $ifile" + return 0 + else + echo "-- ERROR: $ff - $ilnk header not found in file $ifile" + EXITCODE=1 + return 1 + fi +} + +testf () { + sf=$1 + tf=$2 + + if [ -f "$tf" ] ; then + dbg "-- $tf exists" + return 0 + else + echo "-- ERROR: $sf - $tf does not exist" + EXITCODE=1 + return 1 + fi +} + +ck_netdata_relative () { + f=${1} + rlnk=${2} + dbg "-- Checking relative link $rlnk" + fpath="." + fname="$f" + # First ensure that the link works in the repo, then try to fix it in htmldocs + if [[ $f =~ ^(.*)/([^/]*)$ ]] ; then + fpath="${BASH_REMATCH[1]}" + fname="${BASH_REMATCH[2]}" + dbg "-- Current file is at $fpath" + else + dbg "-- Current file is at root directory" + fi + # Cases to handle: + # (#somelink) + # (path/) + # (path/#somelink) + # (path/filename.md) -> htmldoc (path/filename/) + # (path/filename.md#somelink) -> htmldoc (path/filename/#somelink) + # (path#somelink) -> htmldoc (path/#somelink) + # (path/someotherfile) -> htmldoc (absolutelink) + # (path) -> htmldoc (path/) + + TRGT="" + s="" + + case "$rlnk" in + \#* ) + dbg "-- # (#somelink)" + testinternal "$f" "$f" "$rlnk" + ;; + */ ) + dbg "-- # (path/)" + TRGT="$fpath/${rlnk}README.md" + testf "$f" "$TRGT" + if [ $? -eq 0 ] ; then + if [ "$fname" != "README.md" ] ; then s="../$rlnk"; fi + fi + ;; + */\#* ) + dbg "-- # (path/#somelink)" + if [[ $rlnk =~ ^(.*)/#(.*)$ ]] ; then + TRGT="$fpath/${BASH_REMATCH[1]}/README.md" + LNK="#${BASH_REMATCH[2]}" + dbg "-- Look for $LNK in $TRGT" + testf "$f" "$TRGT" + if [ $? -eq 0 ] ; then + testinternal "$f" "$TRGT" "$LNK" + if [ $? -eq 0 ] ; then + if [ "$fname" != "README.md" ] ; then s="../$rlnk"; fi + fi + fi + fi + ;; + *.md ) + dbg "-- # (path/filename.md) -> htmldoc (path/filename/)" + testf "$f" "$fpath/$rlnk" + if [ $? -eq 0 ] ; then + if [[ $rlnk =~ ^(.*)/(.*).md$ ]] ; then + if [ "${BASH_REMATCH[2]}" = "README" ] ; then + s="../${BASH_REMATCH[1]}/" + else + s="../${BASH_REMATCH[1]}/${BASH_REMATCH[2]}/" + fi + if [ "$fname" != "README.md" ] ; then s="../$s"; fi + fi + fi + ;; + *.md\#* ) + dbg "-- # (path/filename.md#somelink) -> htmldoc (path/filename/#somelink)" + if [[ $rlnk =~ ^(.*)#(.*)$ ]] ; then + TRGT="$fpath/${BASH_REMATCH[1]}" + LNK="#${BASH_REMATCH[2]}" + testf "$f" "$TRGT" + if [ $? -eq 0 ] ; then + testinternal "$f" "$TRGT" "$LNK" + if [ $? -eq 0 ] ; then + if [[ $lnk =~ ^(.*)/(.*).md#(.*)$ ]] ; then + if [ "${BASH_REMATCH[2]}" = "README" ] ; then + s="../${BASH_REMATCH[1]}/#${BASH_REMATCH[3]}" + else + s="../${BASH_REMATCH[1]}/${BASH_REMATCH[2]}/#${BASH_REMATCH[3]}" + fi + if [ "$fname" != "README.md" ] ; then s="../$s"; fi + fi + fi + fi + fi + ;; + *\#* ) + dbg "-- # (path#somelink) -> (path/#somelink)" + if [[ $rlnk =~ ^(.*)#(.*)$ ]] ; then + TRGT="$fpath/${BASH_REMATCH[1]}/README.md" + LNK="#${BASH_REMATCH[2]}" + testf "$f" "$TRGT" + if [ $? -eq 0 ] ; then + testinternal "$f" "$TRGT" "$LNK" + if [ $? -eq 0 ] ; then + if [[ $rlnk =~ ^(.*)#(.*)$ ]] ; then + s="${BASH_REMATCH[1]}/#${BASH_REMATCH[2]}" + if [ "$fname" != "README.md" ] ; then s="../$s"; fi + fi + fi + fi + fi + ;; + * ) + if [ -f "$fpath/$rlnk" ] ; then + dbg "-- # (path/someotherfile) $rlnk" + if [ "$fpath" = "." ] ; then + s="https://github.com/netdata/netdata/tree/master/$rlnk" + else + s="https://github.com/netdata/netdata/tree/master/$fpath/$rlnk" + fi + else + if [ -d "$fpath/$rlnk" ] ; then + dbg "-- # (path) -> htmldoc (path/)" + testf "$f" "$fpath/$rlnk/README.md" + if [ $? -eq 0 ] ; then + s="$rlnk/" + if [ "$fname" != "README.md" ] ; then s="../$s"; fi + fi + else + echo "-- ERROR: $f - $rlnk is neither a file or a directory. Giving up!" + EXITCODE=1 + fi + fi + ;; + esac + + if [[ ! -z $s ]] ; then + srch=$(echo "$rlnk" | sed 's/\//\\\//g') + rplc=$(echo "$s" | sed 's/\//\\\//g') + fix "sed -i 's/($srch)/($rplc)/g' $GENERATOR_DIR/src/$f" + fi +} + + +checklinks () { + f=$1 + dbg "Checking $f" + while read -r l ; do + for word in $l ; do + if [[ $word =~ .*\]\(([^\(\) ]*)\).* ]] ; then + lnk="${BASH_REMATCH[1]}" + if [ -z "$lnk" ] ; then continue ; fi + dbg "-$lnk" + case "$lnk" in + mailto:* ) dbg "-- Mailto link, ignoring" ;; + https://github.com/netdata/netdata/wiki* ) + dbg "-- Wiki Link $lnk" + if [ "$CHKWIKI" -eq 1 ] ; then echo "-- WARNING: $f - $lnk points to the wiki. Please replace it manually" ; fi + ;; + https://github.com/netdata/netdata/????/master* ) + dbg "-- Absolute link $lnk" + if [ "$CHKABSOLUTE" -eq 1 ] ; then ck_netdata_absolute "$f" "$lnk" "$lnk" ; fi + ;; + http* ) + dbg "-- External link $lnk" + if [ "$CHKEXTERNAL" -eq 1 ] ; then + testURL "$lnk" + if [ $? -eq 1 ] ; then + echo "-- ERROR: $f - $lnk is a broken link" + EXITCODE=1 + fi + fi + ;; + * ) + dbg "-- Relative link $lnk" + if [ "$CHKRELATIVE" -eq 1 ] ; then ck_netdata_relative "$f" "$lnk" ; fi + ;; + esac + fi + done + done < "$f" +} + +TESTURLS=0 +VERBOSE=0 +RECURSIVE=0 +EXECUTE=0 +CHKWIKI=0 +CHKABSOLUTE=0 +CHKEXTERNAL=0 +CHKRELATIVE=0 +while getopts :f:rxuvwbela option +do + case "$option" in + f) + file=$OPTARG + ;; + r) + RECURSIVE=1 + ;; + x) + EXECUTE=1 + ;; + u) + TESTURLS=1 + ;; + v) + VERBOSE=1 + ;; + w) + CHKWIKI=1 + ;; + b) + CHKABSOLUTE=1 + ;; + e) + CHKEXTERNAL=1 + ;; + l) + CHKRELATIVE=1 + ;; + a) + CHKWIKI=1 + CHKABSOLUTE=1 + CHKEXTERNAL=1 + CHKRELATIVE=1 + ;; + *) + printhelp + exit 1 + ;; + esac +done + +EXITCODE=0 + +if [ -z "${file}" ] ; then + if [ $RECURSIVE -eq 0 ] ; then + printhelp + exit 1 + fi + for f in $(find . -type d \( -path ./${GENERATOR_DIR} -o -path ./node_modules \) -prune -o -name "*.md" -print); do + checklinks "$f" + done +else + if [ $RECURSIVE -eq 1 ] ; then + printhelp + exit 1 + fi + checklinks "$file" +fi + +exit $EXITCODE diff --git a/docs/generator/custom/css/netdata.css b/docs/generator/custom/css/netdata.css new file mode 100644 index 000000000..b3db10883 --- /dev/null +++ b/docs/generator/custom/css/netdata.css @@ -0,0 +1,3 @@ +.md-nav__link { + white-space: nowrap; +} diff --git a/docs/generator/custom/img/favicon.ico b/docs/generator/custom/img/favicon.ico new file mode 100644 index 000000000..7ed957252 Binary files /dev/null and b/docs/generator/custom/img/favicon.ico differ diff --git a/docs/generator/custom/javascripts/cookie-consent.js b/docs/generator/custom/javascripts/cookie-consent.js new file mode 100644 index 000000000..a5c65da49 --- /dev/null +++ b/docs/generator/custom/javascripts/cookie-consent.js @@ -0,0 +1,15 @@ +window.addEventListener("load", function(){ +window.cookieconsent.initialise({ + "palette": { + "popup": { + "background": "#000" + }, + "button": { + "background": "#f1d600" + } + }, + "content": { + "href": "https://docs.netdata.cloud/docs/privacy-policy/" + } +})}); + diff --git a/docs/generator/custom/themes/material/partials/footer.html b/docs/generator/custom/themes/material/partials/footer.html new file mode 100644 index 000000000..fe232b6d5 --- /dev/null +++ b/docs/generator/custom/themes/material/partials/footer.html @@ -0,0 +1,54 @@ +{% import "partials/language.html" as lang with context %} + + diff --git a/docs/generator/requirements.txt b/docs/generator/requirements.txt new file mode 100644 index 000000000..ac01be7ae --- /dev/null +++ b/docs/generator/requirements.txt @@ -0,0 +1,2 @@ +mkdocs>=1.0.1 +mkdocs-material diff --git a/docs/generator/runtime.txt b/docs/generator/runtime.txt new file mode 100644 index 000000000..d70c8f8d8 --- /dev/null +++ b/docs/generator/runtime.txt @@ -0,0 +1 @@ +3.6 diff --git a/docs/high-performance-netdata.md b/docs/high-performance-netdata.md new file mode 100644 index 000000000..a9947d9bc --- /dev/null +++ b/docs/high-performance-netdata.md @@ -0,0 +1,151 @@ +# High performance netdata + +If you plan to run a netdata public on the internet, you will get the most performance out of it by following these rules: + +## 1. run behind nginx + +The internal web server is optimized to provide the best experience with few clients connected to it. Normally a web browser will make 4-6 concurrent connections to a web server, so that it can send requests in parallel. To best serve a single client, netdata spawns a thread for each connection it receives (so 4-6 threads per connected web browser). + +If you plan to have your netdata public on the internet, this strategy wastes resources. It provides a lock-free environment so each thread is autonomous to serve the browser, but it does not scale well. Running netdata behind nginx, idle connections to netdata can be reused, thus improving significantly the performance of netdata. + +In the following nginx configuration we do the following: + +- allow nginx to maintain up to 1024 idle connections to netdata (so netdata will have up to 1024 threads waiting for requests) + +- allow nginx to compress the responses of netdata (later we will disable gzip compression at netdata) + +- we disable wordpress pingback attacks and allow only GET, HEAD and OPTIONS requests. + +``` +upstream backend { + server 127.0.0.1:19999; + keepalive 1024; +} + +server { + listen *:80; + server_name my.web.server.name; + + location / { + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://backend; + proxy_http_version 1.1; + proxy_pass_request_headers on; + proxy_set_header Connection "keep-alive"; + proxy_store off; + gzip on; + gzip_proxied any; + gzip_types *; + + # Block any HTTP requests other than GET, HEAD, and OPTIONS + limit_except GET HEAD OPTIONS { + deny all; + } + } + + # WordPress Pingback Request Denial + if ($http_user_agent ~* "WordPress") { + return 403; + } + +} +``` + +Then edit `/etc/netdata/netdata.conf` and set these config options: + +``` +[global] + bind socket to IP = 127.0.0.1 + access log = none + disconnect idle web clients after seconds = 3600 + enable web responses gzip compression = no +``` + +These options: + +- `[global].bind socket to IP = 127.0.0.1` makes netdata listen only for requests from localhost (nginx). +- `[global].access log = none` disables the access.log of netdata. It is not needed since netdata only listens for requests on 127.0.0.1 and thus only nginx can access it. nginx has its own access.log for your record. +- `[global].disconnect idle web clients after seconds = 3600` will kill inactive web threads after an hour of inactivity. +- `[global].enable web responses gzip compression = no` disables gzip compression at netdata (nginx will compress the responses). + +## 2. increase open files limit (non-systemd) + +By default Linux limits open file descriptors per process to 1024. This means that less than half of this number of client connections can be accepted by both nginx and netdata. To increase them, create 2 new files: + +1. `/etc/security/limits.d/nginx.conf`, with these contents: + + ``` +nginx soft nofile 10000 +nginx hard nofile 30000 +``` + +2. `/etc/security/limits.d/netdata.conf`, with these contents: + + ``` +netdata soft nofile 10000 +netdata hard nofile 30000 +``` + +and to activate them, run: + +```sh +sysctl -p +``` + +## 2b. increase open files limit (systemd) + +Thanks to [@leleobhz](https://github.com/netdata/netdata/issues/655#issue-163932584), this is what you need to raise the limits using systemd: + +This is based on https://ma.ttias.be/increase-open-files-limit-in-mariadb-on-centos-7-with-systemd/ and here worked as following: + +1. Create the folders in /etc: + + ``` +mkdir -p /etc/systemd/system/netdata.service.d +mkdir -p /etc/systemd/system/nginx.service.d +``` + +2. Create limits.conf in each folder as following: + + ``` +[Service] +LimitNOFILE=30000 +``` + +3. Reload systemd daemon list and restart services: + + ```sh +systemctl daemon-reload +systemctl restart netdata.service +systemctl restart nginx.service +``` + +You can check limits with following commands: + +```sh +cat /proc/$(ps aux | grep "nginx: master process" | grep -v grep | awk '{print $2}')/limits | grep "Max open files" +cat /proc/$(ps aux | grep "netdata" | head -n1 | grep -v grep | awk '{print $2}')/limits | grep "Max open files" +``` + +View of the files: + +```sh +# tree /etc/systemd/system/*service.d/etc/systemd/system/netdata.service.d +/etc/systemd/system/netdata.service.d +└── limits.conf +/etc/systemd/system/nginx.service.d +└── limits.conf + +0 directories, 2 files + +# cat /proc/$(ps aux | grep "nginx: master process" | grep -v grep | awk '{print $2}')/limits | grep "Max open files" +Max open files 30000 30000 files + +# cat /proc/$(ps aux | grep "netdata" | head -n1 | grep -v grep | awk '{print $2}')/limits | grep "Max open files" +Max open files 30000 30000 files + +``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fhigh-performance-netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/netdata-for-IoT.md b/docs/netdata-for-IoT.md new file mode 100644 index 000000000..97fba07e6 --- /dev/null +++ b/docs/netdata-for-IoT.md @@ -0,0 +1,41 @@ +# Netdata for IoT + +![image1](https://cloud.githubusercontent.com/assets/2662304/14252446/11ae13c4-fa90-11e5-9d03-d93a3eb3317a.gif) + +> New to netdata? Check its demo: **[https://my-netdata.io/](https://my-netdata.io/)** +> +> [![User Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry) [![Monitored Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry) [![Sessions Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry) +> +> [![New Users Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry) [![New Machines Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry) [![Sessions Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry) + +--- + +netdata is a **very efficient** server performance monitoring solution. When running in server hardware, it can collect thousands of system and application metrics **per second** with just 1% CPU utilization of a single core. Its web server responds to most data requests in about **half a millisecond** making its web dashboards spontaneous, amazingly fast! + +netdata can also be a very efficient real-time monitoring solution for **IoT devices** (RPIs, routers, media players, wifi access points, industrial controllers and sensors of all kinds). Netdata will generally run everywhere a Linux kernel runs (and it is glibc and [musl-libc](https://www.musl-libc.org/) friendly). + +You can use it as both a data collection agent (where you pull data using its API), for embedding its charts on other web pages / consoles, but also for accessing it directly with your browser to view its dashboard. + +The netdata web API already provides **reduce** functions allowing it to report **average** and **max** for any timeframe. It can also respond in many formats including JSON, JSONP, CSV, HTML. Its API is also a **google charts** provider so it can directly be used by google sheets, google charts, google widgets. + +![sensors](https://cloud.githubusercontent.com/assets/2662304/15339745/8be84540-1c8e-11e6-9e9a-106dea7539b6.gif) + +Although netdata has been significantly optimized to lower the CPU and RAM resources it consumes, the plethora of data collection plugins may be inappropriate for weak IoT devices. Please follow the guide on [running netdata in embedded devices](Performance.md) + +## Monitoring RPi temperature + +The python version of the sensors plugin uses `lm-sensors`. Unfortunately the temperature reading of RPi are not supported by `lm-sensors`. + +netdata also has a bash version of the sensors plugin that can read RPi temperatures. It is disabled by default to avoid the conflicts with the python version. + +To enable it, run `sudo edit-config charts.d.conf` and uncomment this line: + +```sh +sensors=force +``` + +Then restart netdata. You will get this: + +![image](https://user-images.githubusercontent.com/2662304/29658868-23aa65ae-88c5-11e7-9dad-c159600db5cc.png) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fnetdata-for-IoT&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/netdata-security.md b/docs/netdata-security.md new file mode 100644 index 000000000..642881067 --- /dev/null +++ b/docs/netdata-security.md @@ -0,0 +1,183 @@ +# Security design + +We have given special attention to all aspects of Netdata, ensuring that everything throughout its operation is as secure as possible. Netdata has been designed with security in mind. + +**Table of Contents** + +1. [Your data are safe with Netdata](#your-data-are-safe-with-netdata) +2. [Your systems are safe with Netdata](#your-systems-are-safe-with-netdata) +3. [Netdata is read-only](#netdata-is-read-only) +4. [Netdata viewers authentication](#netdata-viewers-authentication) + - [Why Netdata should be protected](#why-netdata-should-be-protected) + - [Protect Netdata from the internet](#protect-netdata-from-the-internet) + - [Expose Netdata only in a private LAN](#expose-netdata-only-in-a-private-lan) + - [Use an authenticating web server in proxy mode](#use-an-authenticating-web-server-in-proxy-mode) + - [Other methods](#other-methods) +5. [Registry or how to not send any information to a third party server](#registry-or-how-to-not-send-any-information-to-a-third-party-server) + +## Your data are safe with Netdata + +Netdata collects raw data from many sources. For each source, Netdata uses a plugin that connects to the source (or reads the relative files produced by the source), receives raw data and processes them to calculate the metrics shown on Netdata dashboards. + +Even if Netdata plugins connect to your database server, or read your application log file to collect raw data, the product of this data collection process is always a number of **chart metadata and metric values** (summarized data for dashboard visualization). All Netdata plugins (internal to the Netdata daemon, and external ones written in any computer language), convert raw data collected into metrics, and only these metrics are stored in Netdata databases, sent to upstream Netdata servers, or archived to backend time-series databases. + +> The **raw data** collected by Netdata, do not leave the host they are collected. **The only data Netdata exposes are chart metadata and metric values.** + +This means that Netdata can safely be used in environments that require the highest level of data isolation (like PCI Level 1). + +## Your systems are safe with Netdata + +We are very proud that **the Netdata daemon runs as a normal system user, without any special privileges**. This is quite an achievement for a monitoring system that collects all kinds of system and application metrics. + +There are a few cases however that raw source data are only exposed to processes with escalated privileges. To support these cases, Netdata attempts to minimize and completely isolate the code that runs with escalated privileges. + +So, Netdata **plugins**, even those running with escalated capabilities or privileges, perform a **hard coded data collection job**. They do not accept commands from Netdata. The communication is strictly **unidirectional**: from the plugin towards the Netdata daemon. The original application data collected by each plugin do not leave the process they are collected, are not saved and are not transferred to the Netdata daemon. The communication from the plugins to the Netdata daemon includes only chart metadata and processed metric values. + +Netdata slaves streaming metrics to upstream Netdata servers, use exactly the same protocol local plugins use. The raw data collected by the plugins of slave Netdata servers are **never leaving the host they are collected**. The only data appearing on the wire are chart metadata and metric values. This communication is also **unidirectional**: slave Netdata servers never accept commands from master Netdata servers. + +## Netdata is read-only + +Netdata **dashboards are read-only**. Dashboard users can view and examine metrics collected by Netdata, but cannot instruct Netdata to do something other than present the already collected metrics. + +Netdata dashboards do not expose sensitive information. Business data of any kind, the kernel version, O/S version, application versions, host IPs, etc are not stored and are not exposed by Netdata on its dashboards. + +## Netdata viewers authentication + +Netdata is a monitoring system. It should be protected, the same way you protect all your admin apps. We assume Netdata will be installed privately, for your eyes only. + +### Why Netdata should be protected + +Viewers will be able to get some information about the system Netdata is running. This information is everything the dashboard provides. The dashboard includes a list of the services each system runs (the legends of the charts under the `Systemd Services` section), the applications running (the legends of the charts under the `Applications` section), the disks of the system and their names, the user accounts of the system that are running processes (the `Users` and `User Groups` section of the dashboard), the network interfaces and their names (not the IPs) and detailed information about the performance of the system and its applications. + +This information is not sensitive (meaning that it is not your business data), but **it is important for possible attackers**. It will give them clues on what to check, what to try and in the case of DDoS against your applications, they will know if they are doing it right or not. + +Also, viewers could use Netdata itself to stress your servers. Although the Netdata daemon runs unprivileged, with the minimum process priority (scheduling priority `idle` - lower than nice 19) and adjusts its OutOfMemory (OOM) score to 1000 (so that it will be first to be killed by the kernel if the system starves for memory), some pressure can be applied on your systems if someone attempts a DDoS against Netdata. + +### Protect Netdata from the internet + +Netdata is a distributed application. Most likely you will have many installations of it. Since it is distributed and you are expected to jump from server to server, there is very little usability to add authentication local on each Netdata. + +Until we add a distributed authentication method to Netdata, you have the following options: + +#### Expose Netdata only in a private LAN + +If your organisation has a private administration and management LAN, you can bind Netdata on this network interface on all your servers. This is done in `Netdata.conf` with these settings: + +``` +[web] + bind to = 10.1.1.1:19999 localhost:19999 +``` + +You can bind Netdata to multiple IPs and ports. If you use hostnames, Netdata will resolve them and use all the IPs (in the above example `localhost` usually resolves to both `127.0.0.1` and `::1`). + +**This is the best and the suggested way to protect Netdata**. Your systems **should** have a private administration and management LAN, so that all management tasks are performed without any possibility of them being exposed on the internet. + +For cloud based installations, if your cloud provider does not provide such a private LAN (or if you use multiple providers), you can create a virtual management and administration LAN with tools like `tincd` or `gvpe`. These tools create a mesh VPN allowing all servers to communicate securely and privately. Your administration stations join this mesh VPN to get access to management and administration tasks on all your cloud servers. + +For `gvpe` we have developed a [simple provisioning tool](https://github.com/netdata/netdata-demo-site/tree/master/gvpe) you may find handy (it includes statically compiled `gvpe` binaries for Linux and FreeBSD, and also a script to compile `gvpe` on your Mac). We use this to create a management and administration LAN for all Netdata demo sites (spread all over the internet using multiple hosting providers). + +--- + +In Netdata v1.9+ there is also access list support, like this: + +``` +[web] + bind to = * + allow connections from = localhost 10.* 192.168.* +``` + + +#### Use an authenticating web server in proxy mode + +Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). Instructions are provided on how to set the proxy configuration to have Netdata run behind [nginx](Running-behind-nginx.md#netdata-via-nginx), [Apache](Running-behind-apache.md), [lighthttpd](Running-behind-lighttpd.md#netdata-via-lighttpd-v14x) and [Caddy](Running-behind-caddy.md#netdata-via-caddy). + +To use this method, you should firewall protect all your Netdata servers, so that only the web server IP will allowed to directly access Netdata. To do this, run this on each of your servers (or use your firewall manager): + +```sh +PROXY_IP="1.2.3.4" +iptables -t filter -I INPUT -p tcp --dport 19999 \! -s ${PROXY_IP} -m conntrack --ctstate NEW -j DROP +``` +_commands to allow direct access to Netdata from a web server proxy_ + +The above will prevent anyone except your web server to access a Netdata dashboard running on the host. + +For Netdata v1.9+ you can also use `netdata.conf`: + +``` +[web] + allow connections from = localhost 1.2.3.4 +``` + +Of course you can add more IPs. + +For Netdata prior to v1.9, if you want to allow multiple IPs, use this: + +```sh +# space separated list of IPs to allow access Netdata +NETDATA_ALLOWED="1.2.3.4 5.6.7.8 9.10.11.12" +NETDATA_PORT=19999 + +# create a new filtering chain || or empty an existing one named netdata +iptables -t filter -N netdata 2>/dev/null || iptables -t filter -F netdata +for x in ${NETDATA_ALLOWED} +do + # allow this IP + iptables -t filter -A netdata -s ${x} -j ACCEPT +done + +# drop all other IPs +iptables -t filter -A netdata -j DROP + +# delete the input chain hook (if it exists) +iptables -t filter -D INPUT -p tcp --dport ${NETDATA_PORT} -m conntrack --ctstate NEW -j netdata 2>/dev/null + +# add the input chain hook (again) +# to send all new netdata connections to our filtering chain +iptables -t filter -I INPUT -p tcp --dport ${NETDATA_PORT} -m conntrack --ctstate NEW -j netdata +``` +_script to allow access to Netdata only from a number of hosts_ + +You can run the above any number of times. Each time it runs it refreshes the list of allowed hosts. + +#### Other methods + +Of course, there are many more methods you could use to protect Netdata: + +- bind Netdata to localhost and use `ssh -L 19998:127.0.0.1:19999 remote.netdata.ip` to forward connections of local port 19998 to remote port 19999. This way you can ssh to a Netdata server and then use `http://127.0.0.1:19998/` on your computer to access the remote Netdata dashboard. + +- If you are always under a static IP, you can use the script given above to allow direct access to your Netdata servers without authentication, from all your static IPs. + +- install all your Netdata in **headless data collector** mode, forwarding all metrics in real-time to a master Netdata server, which will be protected with authentication using an nginx server running locally at the master Netdata server. This requires more resources (you will need a bigger master Netdata server), but does not require any firewall changes, since all the slave Netdata servers will not be listening for incoming connections. + +## Anonymous Statistics + +### Registry or how to not send any information to a third party server + +The default configuration uses a public registry under registry.my-netdata.io (more information about the registry here: [mynetdata-menu-item](../registry/) ). Please be aware that if you use that public registry, you submit the following information to a third party server: +- The url where you open the web-ui in the browser (via http request referer) +- The hostnames of the Netdata servers + +If sending this information to the central Netdata registry violates your security policies, you can configure Netdat to [run your own registry](../registry/#run-your-own-registry). + +### Opt out of anonymous statistics + +Starting with v1.12 Netdata also collects [anonymous statistics](anonymous-statistics.md) on certain events for: + +1. **Quality assurance**, to help us understand if netdata behaves as expected and help us identify repeating issues for certain distributions or environments. + +2. **Usage statistics**, to help us focus on the parts of Netdata that are used the most, or help us identify the extent our development decisions influence the community. + +To opt-out from sending anonymous statistics, you can create a file called `.opt-out-from-anonymous-statistics` under the user configuration directory (usually `/etc/netdata`). + +## Netdata directories + +path|owner|permissions| netdata |comments| +:---|:----|:----------|:--------|:-------| +`/etc/netdata`|user `root`
group `netdata`|dirs `0755`
files `0640`|reads|**netdata config files**
may contain sensitive information, so group `netdata` is allowed to read them. +`/usr/libexec/netdata`|user `root`
group `root`|executable by anyone
dirs `0755`
files `0644` or `0755`|executes|**netdata plugins**
permissions depend on the file - not all of them should have the executable flag.
there are a few plugins that run with escalated privileges (Linux capabilities or `setuid`) - these plugins should be executable only by group `netdata`. +`/usr/share/netdata`|user `root`
group `netdata`|readable by anyone
dirs `0755`
files `0644`|reads and sends over the network|**Netdata web static files**
these files are sent over the network to anyone that has access to the netdata web server. Netdata checks the ownership of these files (using settings at the `[web]` section of `netdata.conf`) and refuses to serve them if they are not properly owned. Symbolic links are not supported. Netdata also refuses to serve URLs with `..` in their name. +`/var/cache/netdata`|user `netdata`
group `netdata`|dirs `0750`
files `0660`|reads, writes, creates, deletes|**Netdata ephemeral database files**
Netdata stores its ephemeral real-time database here. +`/var/lib/netdata`|user `netdata`
group `netdata`|dirs `0750`
files `0660`|reads, writes, creates, deletes|**Netdata permanent database files**
Netdata stores here the registry data, health alarm log db, etc. +`/var/log/netdata`|user `netdata`
group `root`|dirs `0755`
files `0644`|writes, creates|**Netdata log files**
all the Netdata applications, logs their errors or other informational messages to files in this directory. These files should be log rotated. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fnetdata-security&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/privacy-policy.md b/docs/privacy-policy.md new file mode 100644 index 000000000..af50b8851 --- /dev/null +++ b/docs/privacy-policy.md @@ -0,0 +1,115 @@ +# Privacy Policy + +## 1. Preamble + +This Privacy Policy explains the collection, use, processing, transferring and disclosure of personal information by Netdata, Inc (“ND” or “Netdata”), a Delaware Corporation. + +This Privacy Policy is incorporated into and made part of the Netdata Master Terms of Use (“Master Terms”) located [here](terms-of-use.md). + +Unless otherwise noted on a particular website or service hosted by Netdata, this Privacy Policy applies to your use of all websites that Netdata operates. These include https://my-netdata.io and https://netdata.cloud, together with all other subdomains thereof, (collectively, the “Websites”). This Privacy Policy also applies to all products, information, and services provided through the Websites, including without limitation the ND agent, the ND registry, the ND hub and the ND cloud website (together with the Websites, the “Services”). + +In addition, supplemental Privacy Policy terms (“Supplemental Privacy Policy Terms”) may apply to a particular Service. All such Supplemental Privacy Policy Terms will be accessible for you to read either within, or through your use of, that particular Service. + +By accessing or using any of the Services, you are accepting and agreeing to the practices described in this Privacy Policy. + +## 2. Our Principles + +Netdata has designed this policy to be consistent with the following principles: + +Privacy policies should be human readable and easy to find. +Data collection, storage, and processing should be simplified as much as possible to enhance security, ensure consistency, and make the practices easy for users to understand. +Data practices should always meet the reasonable expectations of users. + +## 3. Personal Information ND Collects and How it is Used + +As used in this policy, “personal information” means information that would allow someone to identify you, including your name, email address, IP address, or other information from which someone could deduce your identity. + +ND collects and uses personal information in the following ways: + +Website and Analytics: When you visit our Websites and use our Services, ND collects some information about your activities through tools such as Google Analytics. The type of information that we collect focuses on general information such as country or city where you are located, pages visited, time spent on pages, heat-map of visitors’ activity on the site, information about the browser you are using, etc. ND collects and uses this information pursuant to our legitimate interest in enhancing the security and utility of our Services. The information we gather and process is used in the aggregate to spot trends without deliberately identifying individuals. + +Note that you can learn about Google’s practices in connection with its analytics services and how to opt out of it by downloading the Google Analytics opt-out browser add-on, available at https://tools.google.com/dlpage/gaoptout. + +Information from Cookies: We and our service providers (for example, Google Analytics as described above) may collect information using cookies or similar technologies for the purposes described above and below. Cookies are pieces of information that are stored by your browser on the hard drive or memory of your computer or other Internet access device. Cookies may enable us to personalize your experience on the Services, maintain a persistent session, passively collect demographic information about your computer, and monitor advertisements and other activities. The Websites may use different kinds of cookies and other types of local storage (such as browser-based or plugin-based local storage). + + +ND Registry: The global registry, together with certain browser features, allow netdata to provide unified cross-server dashboards, via the `my-netdata` menu. The menu lists the netdata servers you have visited. For example, when you jump from server to server using the `my-netdata` menu, several session settings (like the currently viewed charts, the current zoom and pan operations on the charts, etc.) are propagated to the new server, so that the new dashboard will come with exactly the same view. The global registry keeps track of 3 entities: + +1. **machines**: i.e. the netdata installations (a random GUID generated by each netdata the first time it starts; we call this **machine_guid**). For each netdata installation (each `machine_guid`) the registry keeps track of the different URLs it is accessed. + +2. **persons**: i.e. the web browsers accessing the netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**). For each person, the registry keeps track of the netdata installations it has accessed and their URLs. + +3. **URLs** of netdata installations (as seen by the web browsers). For each URL, the registry keeps the URL and nothing more. Each URL is linked to *persons* and *machines*. The only way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it. + +If sending this information is against your policies, you can [run your own registry](../registry/#run-your-own-registry). +Note that ND versions with the 'Sign in' feature of the ND Cloud do not use the global registry. + +ND Cloud: When you sign up to obtain a user account via the 'Sign in' link on the ND agent user interface, ND is granted access to personal information in the user profile of the authentication provider you choose (e.g. GitHub or Google). ND collects and uses this personal information pursuant to its legitimate interest in establishing and maintaining your account providing you with the features we provide Registered Users. We may use your email address to contact you regarding changes to this policy or other applicable policies. The login name or email address of your profile may be used to attribute you in connection with any content you submit to any Service. + +Anonymous Usage Statistics: From Netdata v1.12 and above, anonymous usage information is collected by default on certain events of the ND daemon and send to Google Analytics. Every time the daemon is started or stopped and every time a fatal condition is encountered, netdata collects system information and sends it to GA via an http call. The information collected for all events is: + - Netdata version + - OS name, version, id, id_like + - Kernel name, version, architecture + - Virtualization technology + - Containerization technology +Furthermore, the FATAL event sends the Netdata process & thread info, along with the file, function and line of the fatal error. + +The statistics calculated from this information are used for: + +1. **Quality assurance**, to help us understand if netdata behaves as expected and help us identify repeating issues for certain distributions or environment. + +2. **Usage statistics**, to help us focus on the parts of netdata that are used the most, or help us identify the extend our development decisions influence the community. + +To opt-out from sending anonymous statistics, you can create reate a file called `.opt-out-from-anonymous-statistics` under the user configuration directory (usually `/etc/netdata`). + +Emails and Newsletters: When you sign up to receive updates from Netdata or otherwise subscribe to one of our mailing lists, you will be asked to provide some personal information. ND collects and uses this personal information pursuant to its legitimate interest in providing news and updates to, and collaborating with, its supporters and volunteers. + +Email Analytics: When you receive communications from ND after signing up for the ND newsletter, campaign updates, or other ongoing email communications from ND, we may use analytics to track whether you open the mail, click on the links, and otherwise interact with what we send. You may opt out of this tracking by choosing to get plain-text emails from ND. ND collects and uses this personal information pursuant to its legitimate interest in understanding the interests of its community of supporters and volunteers in order to provide more relevant news and updates. + +Other Voluntarily Provided Information: When you provide feedback to Netdata, sign a petition distributed by ND, or otherwise submit personal information to Netdata, ND collects and uses this personal information pursuant to its legitimate interest in better understanding our community of supporters and volunteers and in furtherance of the particular program or activity to which you provided feedback or other input. + +## 4. Retention of Personal Information + +The majority of the personal information collected and used as explained in Section 3 above is aggregated and stored in a central database provided by a third party service provider. ND aggregates this data pursuant to its legitimate interest in having information stored in a single location to minimize complexity, increase consistency in internal practices, better understand its community of supporters and volunteers, and enhance the security of the data. + +## 5. Access to Your Personal Information + +You are generally entitled to access personal information that Netdata holds and to have inaccurate data corrected or removed to the extent ND still maintains it. In certain circumstances, you also may have the right to object for legitimate reasons to the processing or transfer of personal information. If you wish to exercise any of these rights, please write to legal@netdata.cloud explaining your request. + +## 6. Disclosure of Your Personal Information + +ND does not disclose personal information to third parties except as specified elsewhere in this policy and in the following instances: + +We may disclose your personal information to third parties in a good faith belief that such disclosure is reasonably necessary to (a) take action regarding suspected illegal activities; (b) enforce or apply our Master Terms and this Privacy Policy; (c) enforce our Charter, including the Code of Conduct and policies contained and incorporated therein, or (d) comply with legal process, such as a search warrant, subpoena, statute, or court order. + +## 7. Security of Your Personal Information + +Netdata has implemented reasonable physical, technical, and organizational security measures for personal information that Netdata processes against accidental or unlawful destruction, or accidental loss, alteration, unauthorized disclosure or access, in compliance with applicable law. However, no website can fully eliminate security risks. If any data breach occurs, we will post a reasonably prominent notice to the Websites and comply with all other applicable data privacy requirements including, when required, personal notice to you if you have provided and we have maintained an email address for you. + +The ND Cloud Services have security risks in addition to those described above. Among other things, they are vulnerable to DNS attacks, and using any ND Cloud Service may increase the risk of phishing. + +## 8. Children + +The Services are not directed at children under the age of 13. Consistent with the U.S. federal Children’s Online Privacy Protection Act of 1998 (COPPA), we will never knowingly request personal information from anyone under the age of 13 without requiring parental consent. Our Master Terms specifically prohibit anyone using our Services from submitting any personally identifiable information about persons under 13 years of age. Any person who provides their personal information to ND through the Services represents that they are 13 years of age or older. + +## 9. Third Party Service Providers + +Netdata uses many third party service providers in connection with the Services, including website hosting services, database management, credit card processing, and many more. Some of these service providers may place session cookies on your computer, and they may collect and store your personal information on our behalf in accordance with the data practices and purposes explained above in Section 3. + +## 10. Third Party Sites + +The Services may provide links to a wide variety of third party websites. You should consult the respective privacy policies of these third-party websites. This Privacy Policy does not apply to, and we cannot control the activities of, such other websites. + +## 11. Transferring Data to Other Countries + +If you are accessing or using the Services in regions with laws governing data collection, processing, transfer and use, please note that when we use and share your data as specified in this policy, we may transfer your information to recipients in countries other than the country in which the information was originally collected. Those countries may not have the same data protection laws as the country in which you initially provided the information. + +Data transferred from the European Union to the United States or outside the European Union will be made on the grounds of a certification to the E.U./U.S. Privacy Shield regime and/or a data transfer agreement based on the Standard Contractual Clauses approved of by the European Commission respectively, consistent with applicable data privacy requirements. + +## 12. Changes to this Privacy Policy + +We may occasionally update this Privacy Policy. When we do, we will provide you with notice of such update through (at a minimum) a reasonably prominent notice on the Websites and Services, and will revise the Effective Date below. We encourage you to periodically review this Privacy Policy to stay informed about how we are protecting, using, processing and transferring the personal information we collect. + +Effective Date: 8 January 2019. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fprivacy-policy&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/terms-of-use.md b/docs/terms-of-use.md new file mode 100644 index 000000000..5565f6056 --- /dev/null +++ b/docs/terms-of-use.md @@ -0,0 +1,161 @@ +# Terms of Use + +Netdata Master Terms of Use +Effective as of 25 May 2018 + +## 1. General Information Regarding These Terms of Use + +Master terms: Welcome, and thank you for your interest in Netdata (“Netdata, Inc.” “ND,” “we,” “our,” or “us”). Unless otherwise noted on a particular site or service, these master terms of use (“Master Terms”) apply to your use of all of the websites that Netdata Corporation operates. These include https://my-netdata.io and https://netdata.cloud, together with all other subdomains thereof, (collectively, the “Websites”). The Master Terms also apply to all products, information, and services provided through the Websites, such as the NDID Login Service. + +Additional terms: In addition to the Master Terms, your use of any Services may also be subject to specific terms applicable to a particular Service (“Additional Terms”). If there is any conflict between the Additional Terms and the Master Terms, then the Additional Terms apply in relation to the relevant Service. + +Collectively, the Terms: The Master Terms, together with any Additional Terms, form a binding legal agreement between you and Netdata in relation to your use of the Services. Collectively, this legal agreement is referred to below as the “Terms.” + +Human-readable summary of Sec 1: These terms, together with any special terms for particular websites, create a contract between you and Netdata. The contract governs your use of all websites operated by Netdata, unless a particular website indicates otherwise. These human-readable summaries of each section are not part of the contract, but are intended to help you understand its terms. + +## 2. Your Agreement to the Terms + +BY ACCESSING OR USING ANY OF THE SERVICES, YOU ACKNOWLEDGE THAT YOU HAVE READ, UNDERSTOOD, AND AGREED TO BE BOUND BY THE TERMS. By accessing or using any Services you also represent that you have the legal authority to accept the Terms on behalf of yourself and any party you represent in connection with your use of any Services. If you do not agree to the Terms, you are not authorized to use any Services. If you are an individual who is entering into these Terms on behalf of an entity, you represent and warrant that you have the power to bind that entity, and you hereby agree on that entity’s behalf to be bound by these Terms, with the terms “you,” and “your” applying to you, that entity, and other users accessing the Services on behalf of that entity. + +Human-readable summary of Sec 2: Please read these terms and only use our sites and services if you agree to them. + +## 3. Changes to the Terms + +From time to time, Netdata may change, remove, or add to the Terms, and reserves the right to do so in its discretion. In that case, we will post updated Terms and indicate the date of revision. If we feel the modifications are material, we will make reasonable efforts to post a prominent notice on the relevant Website(s) and notify those of you with a current NDID Login Service account via email. All new and/or revised Terms take effect immediately and apply to your use of the Services from that date on, except that material changes will take effect 30 days after the change is made and identified as material. Your continued use of any Services after new and/or revised Terms are effective indicates that you have read, understood, and agreed to those Terms. + +Human-readable summary of Sec 3: These terms may change. When the changes are important, we will put a notice on the website. If you continue to use the sites after the changes are made, you agree to the changes. + +## 4. No Legal Advice + +Netdata is not a law firm, does not provide legal advice, and is not a substitute for a law firm. Sending us an email or using any of the Services, including the licenses, public domain tools, and choosers, does not constitute legal advice or create an attorney-client relationship. + +Human-readable summary of Sec 4: Some of us are lawyers, but we aren’t your lawyer. Please consult your own attorney if you need legal advice. + +## 5. Content Available through the Services + +Provided as-is: You acknowledge that Netdata does not make any representations or warranties about the material, data, and information, such as data files, text, computer software, code, music, audio files or other sounds, photographs, videos, or other images (collectively, the “Content”) which you may have access to as part of, or through your use of, the Services. Under no circumstances is Netdata liable in any way for any Content, including, but not limited to: any infringing Content, any errors or omissions in Content, or for any loss or damage of any kind incurred as a result of the use of any Content posted, transmitted, linked from, or otherwise accessible through or made available via the Services. You understand that by using the Services, you may be exposed to Content that is offensive, indecent, or objectionable. + +You agree that you are solely responsible for your reuse of Content made available through the Services, including providing proper attribution. You should review the terms of the applicable license before you use the Content so that you know what you can and cannot do. + +Licensing: ND-Owned Content: Other than the text of Netdata licenses, ND licenses, and other legal tools and the text of the deeds for all legal tools, Netdata trademarks (subject to the Trademark Policy), and the software code, all Content on the Websites is licensed under the Creative Commons Attribution 4.0 license, unless otherwise marked. See the ND Policies page for more information. + +ND-Owned Code: All of CC’s software code is free software; please check our code repository for the specific license on software you want to reuse. + +Search Tools: On some of its Websites, Netdata provides website search tools, including ND Search, which return Content based on any information our search tools are able to locate and interpret. Those search tools may return Content that is not ND licensed, and you should independently verify the terms of the license attached to any Content you intend to use. + +Human-readable summary of Sec 5: We try our best to have useful information on our sites, but we cannot promise that everything is accurate or appropriate for your situation. Content on the site is licensed under CC BY 4.0 unless it says it is available under different terms. If you find content through a link on our websites, be sure to check the license terms before using it. + +## 6. Content Supplied by You + +Your responsibility: You represent, warrant, and agree that no Content posted or otherwise shared by you on or through any of the Services (“Your Content”), violates or infringes upon the rights of any third party, including copyright, trademark, privacy, publicity, or other personal or proprietary rights, breaches or conflicts with any obligation, such as a confidentiality obligation, or contains libelous, defamatory, or otherwise unlawful material. + +Licensing Your Content: You retain any copyright that you may have in Your Content. You hereby agree that Your Content: (a) is hereby licensed under the CC Attribution 4.0 License and may be used under the terms of that license or any later version of a CC Attribution License, or (b) is in the public domain (such as Content that is not copyrightable or Content you make available under CC0), or (c) if not owned by you, (i) is available under a CC Attribution 4.0 License or (ii) is a media file that is available under any CC license or that you are authorized by law to post or share through any of the Services, such as under the fair use doctrine, and that is prominently marked as being subject to third party copyright. All of Your Content must be appropriately marked with licensing (or other permission status such as fair use) and attribution information. + +Removal: Netdata may, but is not obligated to, review Your Content and may delete or remove Your Content (without notice) from any of the Services in its sole discretion. Removal of any of Your Content from the Services (by you or Netdata) does not impact any rights you granted in Your Content under the terms of a Netdata license. + +Human-readable summary of Sec 6: We do not take any ownership of your content when you post it on our sites. If you post content you own, you agree it can be used under the terms of CC BY 4.0 or any future version of that license. If you do not own the content, then you should not post it unless it is in the public domain or licensed CC BY 4.0, except that you may also post pictures and videos if you are authorized to use them under law (e.g. fair use) or if they are available under any CC license. You must note that information on the file when you upload it. You are responsible for any content you upload to our sites. + +## 7. Prohibited Conduct + +You agree not to engage in any of the following activities: + +### 1. Violating laws and rights: + +You may not (a) use any Service for any illegal purpose or in violation of any local, state, national, or international laws, (b) violate or encourage others to violate any right of or obligation to a third party, including by infringing, misappropriating, or violating intellectual property, confidentiality, or privacy rights. + +### 2. Solicitation: + +You may not use the Services or any information provided through the Services for the transmission of advertising or promotional materials, including junk mail, spam, chain letters, pyramid schemes, or any other form of unsolicited or unwelcome solicitation. + +### 3. Disruption: + +You may not use the Services in any manner that could disable, overburden, damage, or impair the Services, or interfere with any other party’s use and enjoyment of the Services; including by (a) uploading or otherwise disseminating any virus, adware, spyware, worm or other malicious code, or (b) interfering with or disrupting any network, equipment, or server connected to or used to provide any of the Services, or violating any regulation, policy, or procedure of any network, equipment, or server. + +### 4. Harming others: + +You may not post or transmit Content on or through the Services that is harmful, offensive, obscene, abusive, invasive of privacy, defamatory, hateful or otherwise discriminatory, false or misleading, or incites an illegal act; +You may not intimidate or harass another through the Services; and, you may not post or transmit any personally identifiable information about persons under 13 years of age on or through the Services. + +### 5. Impersonation or unauthorized access: + +You may not impersonate another person or entity, or misrepresent your affiliation with a person or entity when using the Services; +You may not use or attempt to use another’s account or personal information without authorization; and +You may not attempt to gain unauthorized access to the Services, or the computer systems or networks connected to the Services, through hacking password mining or any other means. + +Human-readable summary of Sec 7: Play nice. Be yourself. Don’t break the law or be disruptive. + +## 8. DISCLAIMER OF WARRANTIES + +TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, NETDATA OFFERS THE SERVICES (INCLUDING ALL CONTENT AVAILABLE ON OR THROUGH THE SERVICES) AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE SERVICES, EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, INCLUDING WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. NETDATA DOES NOT WARRANT THAT THE FUNCTIONS OF THE SERVICES WILL BE UNINTERRUPTED OR ERROR-FREE, THAT CONTENT MADE AVAILABLE ON OR THROUGH THE SERVICES WILL BE ERROR-FREE, THAT DEFECTS WILL BE CORRECTED, OR THAT ANY SERVERS USED BY ND ARE FREE OF VIRUSES OR OTHER HARMFUL COMPONENTS. NETDATA DOES NOT WARRANT OR MAKE ANY REPRESENTATION REGARDING USE OF THE CONTENT AVAILABLE THROUGH THE SERVICES IN TERMS OF ACCURACY, RELIABILITY, OR OTHERWISE. + +Human-readable summary of Sec 8: ND does not make any guarantees about the sites, services, or content available on the sites. + +## 9. LIMITATION OF LIABILITY + +TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL NETDATA BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY INCIDENTAL, DIRECT, INDIRECT, PUNITIVE, ACTUAL, CONSEQUENTIAL, SPECIAL, EXEMPLARY, OR OTHER DAMAGES, INCLUDING WITHOUT LIMITATION, LOSS OF REVENUE OR INCOME, LOST PROFITS, PAIN AND SUFFERING, EMOTIONAL DISTRESS, COST OF SUBSTITUTE GOODS OR SERVICES, OR SIMILAR DAMAGES SUFFERED OR INCURRED BY YOU OR ANY THIRD PARTY THAT ARISE IN CONNECTION WITH THE SERVICES (OR THE TERMINATION THEREOF FOR ANY REASON), EVEN IF NETDATA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, NETDATA IS NOT RESPONSIBLE OR LIABLE WHATSOEVER IN ANY MANNER FOR ANY CONTENT POSTED ON OR AVAILABLE THROUGH THE SERVICES (INCLUDING CLAIMS OF INFRINGEMENT RELATING TO THAT CONTENT), FOR YOUR USE OF THE SERVICES, OR FOR THE CONDUCT OF THIRD PARTIES ON OR THROUGH THE SERVICES. + +Certain jurisdictions do not permit the exclusion of certain warranties or limitation of liability for incidental or consequential damages, which means that some of the above limitations may not apply to you. IN THESE JURISDICTIONS, THE FOREGOING EXCLUSIONS AND LIMITATIONS WILL BE ENFORCED TO THE GREATEST EXTENT PERMITTED BY APPLICABLE LAW. + +Human-readable summary of Sec 9: ND is not responsible for the content on the sites, your use of our services, or for the conduct of others on our sites. + +## 10. Indemnification + +To the extent authorized by law, you agree to indemnify and hold harmless Netdata, its employees, officers, directors, affiliates, and agents from and against any and all claims, losses, expenses, damages, and costs, including reasonable attorneys’ fees, resulting directly or indirectly from or arising out of (a) your violation of the Terms, (b) your use of any of the Services, and/or (c) the Content you make available on any of the Services. + +Human-readable summary of Sec 10: If something happens because you violate these terms, because of your use of the services, or because of the content you post on the sites, you agree to repay ND for the damage it causes. + +## 11. Privacy Policy + +Netdata is committed to responsibly handling the information and data we collect through our Services in compliance with our Privacy Policy, which is incorporated by reference into these Master Terms. Please review the Privacy Policy so you are aware of how we collect and use your personal information. + +Human-readable summary of Sec 11: Please read our Privacy Policy. It is part of these terms, too. + +## 12. Trademark Policy + +ND’s name, logos, icons, and other trademarks may only be used in accordance with our Trademark Policy, which is incorporated by reference into these Master Terms. Please review the Trademark Policy so you understand how ND’s trademarks may be used. + +Human-readable summary of Sec 12: Please read our Trademark Policy. It is part of these terms, too. + +## 13. Copyright Complaints + +Netdata respects copyright, and we prohibit users of the Services from submitting, uploading, posting, or otherwise transmitting any Content on the Services that violates another person’s proprietary rights. + +To report allegedly infringing Content hosted on a website owned or controlled by ND, send a Notice of Infringing Materials to info@netdata.cloud. + +Please note that Netdata does not host the Content made available through ND Search. You should contact the web site or service hosting the Content to have it removed. + +Human-readable summary of Sec 13: Please let us know if you find infringing content on our websites. + +## 14. Termination + +By Netdata: Netdata may modify, suspend, or terminate the operation of, or access to, all or any portion of the Services at any time for any reason. Additionally, your individual access to, and use of, the Services may be terminated by Netdata at any time and for any reason. + +By you: If you wish to terminate this agreement, you may immediately stop accessing or using the Services at any time. + +Automatic upon breach: Your right to access and use the Services (including use of your ND Login Service account) automatically upon your breach of any of the Terms. For the avoidance of doubt, termination of the Terms does not require you to remove or delete any reference to previously-applied ND legal tools from your own Content. + +Survival: The disclaimer of warranties, the limitation of liability, and the jurisdiction and applicable law provisions will survive any termination. The license grants applicable to Your Content are not impacted by the termination of the Terms and shall continue in effect subject to the terms of the applicable license. Your warranties and indemnification obligations will survive for one year after termination. + +Human-readable summary of Sec 14: If you violate these terms, you may no longer use our sites. + +## 15. Miscellaneous Terms + +Choice of law: The Terms are governed by and construed by the laws of the State of Delaware in the United States, not including its choice of law rules. + +Dispute resolution: The parties agree that any disputes between Netdata and you concerning these Terms, and/or any of the Services may only brought in a federal or state court of competent jurisdiction sitting in the State of Delaware, and you hereby consent to the personal jurisdiction and venue of such court. + +If you are an authorized agent of a government or intergovernmental entity using the Services in your official capacity, including an authorized agent of the federal, state, or local government in the United States, and you are legally restricted from accepting the controlling law, jurisdiction, or venue clauses above, then those clauses do not apply to you. For any such U.S. federal government entities, these Terms and any action related thereto will be governed by the laws of the United States of America (without reference to conflict of laws) and, in the absence of federal law and to the extent permitted under federal law, the laws of the State of Delaware (excluding its choice of law rules). + +No waiver: Either party’s failure to insist on or enforce strict performance of any of the Terms will not be construed as a waiver of any provision or right. + +Severability: If any part of the Terms is held to be invalid or unenforceable by any law or regulation or final determination of a competent court or tribunal, that provision will be deemed severable and will not affect the validity and enforceability of the remaining provisions. + +No agency relationship: The parties agree that no joint venture, partnership, employment, or agency relationship exists between you and Netdata as a result of the Terms or from your use of any of the Services. + +Integration: These Master Terms and any applicable Additional Terms constitute the entire agreement between you and Netdata relating to this subject matter and supersede any and all prior communications and/or agreements between you and Netdata relating to access and use of the Services. + +Human-readable summary of Sec 15: If there is a lawsuit arising from these terms, it should be in Delaware and governed by Delaware law. We are glad you use our sites, but this agreement does not mean we are partners. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fterms-of-use&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/why-netdata/1s-granularity.md b/docs/why-netdata/1s-granularity.md new file mode 100644 index 000000000..089854543 --- /dev/null +++ b/docs/why-netdata/1s-granularity.md @@ -0,0 +1,53 @@ +# 1s granularity + +High resolution metrics are required to effectively monitor and troubleshoot systems and applications. + +## Why? + +- The world is going real-time. Today, customer experience is significantly affected by response time, so SLAs are tighter than ever before. It is just not practical to monitor a 2-second SLA with 10-second metrics. + +- IT goes virtual. Unlike real hardware, virtual environments are not linear, nor predictable. You cannot expect resources to be available when your applications need them. They will eventually be, but not exactly at the time they are needed. The latency of virtual environments is affected by many factors, most of which are outside our control, like: the maintenance policy of the hosting provider, the work load of third party virtual machines running on the same physical servers combined with the resource allocation and throttling policy among virtual machines, the provisioning system of the hosting provider, etc. + +## What do others do? + +So, why don't most monitoring platforms and monitoring SaaS providers offer high resolution metrics? + +They want to, but they can't, at least not massively. + +The reasons lie in their design decisions: + +1. Time-series databases (prometheus, graphite, opentsdb, influxdb, etc) centralize all the metrics. At scale, these databases can easily become the bottleneck of the whole infrastructure. + +2. SaaS providers base their business models on centralizing all the metrics. On top of the time-series database bottleneck they also have increased bandwidth costs. So, massively supporting high resolution metrics, destroys their business model. + +Of course, since a couple of decades the world has fixed this kind of scaling problems: instead of scaling up, scale out, horizontally. That is, instead of investing on bigger and bigger central components, decentralize the application so that it can scale by adding more smaller nodes to it. + +There have been many attempts to fix this problem for monitoring. But so far, all solutions required centralization of metrics, which can only scale up. So, although the problem is somehow managed, it is still the key problem of all monitoring platforms and one of the key reasons for increased monitoring costs. + +Another important factor is how resource efficient data collection can be when running per second. Most solutions fail to do it properly. The data collection agent is consuming significant system resources when running "per second", influencing the monitored systems and applications to a great degree. + +Finally, per second data collection is a lot harder. Busy virtual environments have [a constant latency of about 100ms, spread randomly to all data sources](https://docs.google.com/presentation/d/18C8bCTbtgKDWqPa57GXIjB2PbjjpjsUNkLtZEz6YK8s/edit#slide=id.g422e696d87_0_57). If data collection is not implemented properly, this latency introduces a random error of +/- 10%, which is quite significant for a monitoring system. + +So, the monitoring industry fails to massively provide high resolution metrics, mainly for 3 reasons: + +1. Centralization of metrics makes monitoring cost inefficient at that rate. +2. Data collection needs optimization, otherwise it will significantly affect the monitored systems. +3. Data collection is a lot harder, especially on busy virtual environments. + +## What does netdata do differently? + +Netdata decentralizes monitoring completely. Each Netdata node is autonomous. It collects metrics locally, it stores them locally, it runs checks against them to trigger alarms locally, and provides an API for the dashboards to visualize them. This allows Netdata to scale to infinity. + +Of course, Netdata can centralize metrics when needed. For example, it is not practical to keep metrics locally on ephemeral nodes. For these cases, Netdata streams the metrics in real-time, from the ephemeral nodes to one or more non-ephemeral nodes nearby. This centralization is again distributed. On a large infrastructure, there may be many centralization points. + +To eliminate the error introduced by data collection latencies on busy virtual environments, Netdata interpolates collected metrics. It does this using microsecond timings, per data source, offering measurements with an error rate of 0.0001%. When running [in debug mode, netdata calculates this error rate](https://github.com/netdata/netdata/blob/36199f449852f8077ea915a3a14a33fa2aff6d85/database/rrdset.c#L1070-L1099) for every point collected, ensuring that the database works with acceptable accuracy. + +Finally, Netdata is really fast. Optimization is a core product feature. On modern hardware, Netdata can collect metrics with a rate of above 1M metrics per second per core (this includes everything, parsing data sources, interpolating data, storing data in the time series database, etc). So, for a few thousands metrics per second per node, Netdata needs negligible CPU resources (just 1-2% of a single core). + +Netdata has been designed to: +- Solve the centralization problem of monitoring +- Replace the console for performance troubleshooting. + +So, for Netdata 1s granularity is easy, the natural outcome... + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2F1s-granularity&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/why-netdata/README.md b/docs/why-netdata/README.md new file mode 100644 index 000000000..df8c0d02b --- /dev/null +++ b/docs/why-netdata/README.md @@ -0,0 +1,30 @@ +# Why Netdata + +> Any performance monitoring solution that does not go down to per second +> collection and visualization of the data, is useless. +> It will make you happy to have it, but it will not help you more than that. + +Netdata is built around 4 principles: + +1. **[Per second data collection for all metrics.](1s-granularity.md)** + + *It is impossible to monitor a 2 second SLA, with 10 second metrics.* + +2. **[Collect and visualize all the metrics from all possible sources.](unlimited-metrics.md)** + + *To troubleshoot slowdowns, we need all the available metrics. The console should not provide more metrics.* + +3. **[Meaningful presentation, optimized for visual anomaly detection.](meaningful-presentation.md)** + + *Metrics are a lot more than name-value pairs over time. The monitoring tool should know all the metrics. Users should not!* + +4. **[Immediate results, just install and use.](immediate-results.md)** + + *Most of our infrastructure is standardized. There is no point to configure everything metric by metric.* + +Unlike other monitoring solutions that focus on metrics visualization, +Netdata's helps us troubleshoot slowdowns without touching the console. + +So, everything is a bit different. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2FWhy-Netdata&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/why-netdata/immediate-results.md b/docs/why-netdata/immediate-results.md new file mode 100644 index 000000000..9afe4afdc --- /dev/null +++ b/docs/why-netdata/immediate-results.md @@ -0,0 +1,41 @@ +# Immediate results + +Most of our infrastructure is based on standardized systems and applications. + +It is a tremendous waste of time and effort, in a global scale, to require from all users to configure their infrastructure dashboards and alarms metric by metric. + +## Why? + +Most of the existing monitoring solutions, focus on providing a platform "for building your monitoring". So, they provide the tools to collect metrics, store them, visualize them, check them and query them. And we are all expected to go through this process. + +However, most of our infrastructure is standardized. We run well known Linux distributions, the same kernel, the same database, the same web server, etc. + +So, why can't we have a monitoring system that can be installed and instantly provide feature rich dashboards and alarms about everything we use? Is there any reason you would like to monitor your web server differently than me? + +What a waste of time and money! Hundreds of thousands of people doing the same thing over and over again, trying to understand what the metrics are, how to visualize them, how to configure alarms for them and how to query them when issues arise. + +## What do others do? + +Open-source solutions rely almost entirely on configuration. So, you have to go through endless metric-by-metric configuration yourself. The result will reflect your skills, your experience, your understanding. + +Monitoring SaaS providers offer a very basic set of pre-configured metrics, dashboards and alarms. They assume that you will configure the rest you may need. So, once more, the result will reflect your skills, your experience, your understanding. + +## What does netdata do? + +1. Metrics are auto-detected, so for 99% of the cases data collection works out of the box. +2. Metrics are converted to human readable units, right after data collection, before storing them into the database. +3. Metrics are structured, organized in charts, families and applications, so that they can be browsed. +4. Dashboards are automatically generated, so all metrics are available for exploration immediately after installation. +5. Dashboards are not just visualizing metrics; they are a tool, optimized for visual anomaly detection. +6. Hundreds of pre-configured alarm templates are automatically attached to collected metrics. + +The result is that Netdata can be used immediately after installation! + +Netdata: + +- Helps engineers understand and learn what the metrics are. +- Does not require any configuration. Of course there are thousands of options to tweak, but the defaults are pretty good for most systems. +- Does not introduce any query languages or any other technology to be learned. Of course some familiarity with the tool is required, but nothing too complicated. +- Includes all the community expertise and experience for monitoring systems and applications. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Fimmediate-results&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/why-netdata/meaningful-presentation.md b/docs/why-netdata/meaningful-presentation.md new file mode 100644 index 000000000..6414d023f --- /dev/null +++ b/docs/why-netdata/meaningful-presentation.md @@ -0,0 +1,63 @@ +# Meaningful presentation + +Metrics are a lot more than name-value pairs over time. It is just not practical to require from all users to have a deep understanding of all metrics for monitoring their systems and applications. + +## Why? + +There is a plethora of metrics. And each of them has a context, a meaning, a way to be interpreted. + +Traditionally, monitoring solutions instruct engineers to collect only the metrics they understand. This is a good strategy as long as you have a clear understanding of what you need and you have the skills, the expertise and the experience to select them. + +For most people, this is an impossible task. It is just not practical to assume that any engineer will have a deep understanding of how the kernel works, how the networking stack works, how the system manages its memory, how it schedules processes, how web servers work, how databases work, etc. + +The result is that for most of the world, monitoring sucks. It is incomplete, inefficient, and in most of the cases only useful for providing an illusion that the infrastructure is being monitored. It is not! According to the [State of Monitoring 2017](http://start.bigpanda.io/state-of-monitoring-report-2017), only 11% of the companies are satisfied with their existing monitoring infrastructure, and on the average they use 6-7 monitoring tools. + +But even if all the metrics are collected, an even bigger challenge is revealed: What to do with them? How to use them? + +The existing monitoring solutions, assume the engineers will: + +- Design dashboards +- Configure alarms +- Use a query language to investigate issues + +However, all these have to be configured metric by metric. + +The monitoring industry believes there is this "IT Operations Hero", a person combining these abilities: + +1. Has a deep understanding of IT architectures and is a skillful SysAdmin. +2. Is a superb Network Administrator (can even read and understand the Linux kernel networking stack). +3. Is a exceptional database administrator. +4. Is fluent in software engineering, capable of understanding the internal workings of applications. +5. Masters Data Science, statistical algorithms and is fluent in writing advanced mathematical queries to reveal the meaning of metrics. + +Of course this person does not exist! + +## What do others do? + +Most solutions are based on a time-series database. A database that tracks name-value pairs, over time. + +Data collection blindly collects metrics and stores them into the database, dashboard editors query the database to visualize the metrics. They may also provide a query editor, that users can use to query the database by hand. + +Of course, it is just not practical to work that way when the database has 10,000 unique metrics. Most of them will be just noise, not because they are not useful, but because no one understands them! + +So, they collect very limited metrics. Basic dashboards can be created with these metrics, but for any issue that needs to be troubleshooted, the monitoring system is just not adequate. It cannot help. So, engineers are using the console to access the rest of the metrics and find the root cause. + +## What does netdata do? + +In netdata, the meaning of metrics is incorporated into the database: + +1. all metrics are converted and stored to human-friendly units. This is a data-collection process, not a visualization process. For example, cpu utilization in Netdata is stored as percentage, not as kernel ticks. + +2. all metrics are organized into human-friendly charts, sharing the same context and units (similar to what other monitoring solutions call `cardinality`). So, when Netdata developer collect metrics, they configure the correlation of the metrics right in data collection, which is stored in the database too. + +3. all charts are then organized in families, and chart families are organized in applications. These structures are responsible for providing the menu at the right side of Netdata dashboards for exploring the whole database. + +The result is a system that can be browsed by humans, even if the database has 100,000 unique metrics. It is pretty natural for everyone to browse them, understand their meaning and their scope. + +Of course, this process makes data collection significantly more time consuming. Netdata developers need to normalize and correlate and categorize every single metric Netdata collects. + +But it simplifies everything else. Data collection, metrics database and visualization are de-coupled, thus the query engine is simpler, and the visualization is straight forward. + +Netdata goes a step further, by enriching the dashboard with information that is useful for most people. So, to improve clarity and help users be more effective, Netdata includes right in the dashboard the community knowledge and expertise about the metrics. So, that Netdata users can focus on solving their infrastructure problem, not on the technicalities of data collection and visualization. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Fmeaningful-presentation&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/why-netdata/unlimited-metrics.md b/docs/why-netdata/unlimited-metrics.md new file mode 100644 index 000000000..e35034a2b --- /dev/null +++ b/docs/why-netdata/unlimited-metrics.md @@ -0,0 +1,44 @@ +# Unlimited metrics + +All metrics are important and all metrics should be available when you need them. + +## Why? + +Collecting all the metrics breaks the first rule of every monitoring text book: "collect only the metrics you need", "collect only the metrics you understand". + +Unfortunately, this does not work! Filtering out most metrics is like reading a book by skipping most of its pages... + +For many people, monitoring is about: + +- Detecting outages +- Capacity planning + +However, **slowdowns are 10 times more common** compared to outages (check slide 14 of [Online Performance is Business Performance ](https://www.slideshare.net/KenGodskind/alertsitetrac) reported by Trac Research/AlertSite). Designing a monitoring system targeting only outages and capacity planning solves just a tiny part of the operational problems we face. Check also [Downtime vs. Slowtime: Which Hurts More?](https://dzone.com/articles/downtime-vs-slowtime-which-hurts-more). + +To troubleshoot a slowdown, a lot more metrics are needed. Actually all the metrics are needed, since the real cause of a slowdown is most probably quite complex. If we knew the possible reasons, chances are we would have fixed them before they become a problem. + +## What do others do? + +Most monitoring solutions, when they are able to detect something, provide just a hint (e.g. "hey, there is a 20% drop in requests per second over the last minute") and they expect us to use the console for determining the root cause. + +Of course this introduces a lot more problems: how to troubleshoot a slowdown using the console, if the slowdown lifetime is just a few seconds, randomly spread throughout the day? + +You can't! You will spend your entire day on the console, waiting for the problem to happen again while you are logged in. A blame war starts: developers blame the systems, sysadmins blame the hosting provider, someone says it is a DNS problem, another one believes it is network related, etc. We have all experienced this, multiple times... + +So, why do monitoring solutions and SaaS providers filter out metrics? + +They can't do otherwise! + +1. Centralization of metrics depends on metrics filtering, to control monitoring costs. Time-series databases limit the number of metrics collected, because the number of metrics influences their performance significantly. They get congested at scale. +2. It is a lot easier to provide an illusion of monitoring by using a few basic metrics. +3. Troubleshooting slowdowns is the hardest IT problem to solve, so most solutions just avoid it. + +## What does netdata do? + +Netdata collects, stores and visualizes everything, every single metric exposed by systems and applications. + +Due to Netdata's distributed nature, the number of metrics collected does not have any noticeable effect on the performance or the cost of the monitoring infrastructure. + +Of course, since netdata is also about [meaningful presentation](meaningful-presentation.md), the number of metrics makes Netdata development slower. We, the Netdata developers, need to have a good understanding of the metrics before adding them into Netdata. We need to organize the metrics, add information related to them, configure alarms for them, so that you, the Netdata users, will have the best out-of-the-box experience and all the information required to kill the console for troubleshooting slowdowns. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fwhy-netdata%2Funlimited-metrics&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/.keep b/health/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/health/Makefile.am b/health/Makefile.am index 829a41b3b..40592a964 100644 --- a/health/Makefile.am +++ b/health/Makefile.am @@ -16,12 +16,11 @@ dist_noinst_DATA = \ userhealthconfigdir=$(configdir)/health.d dist_userhealthconfig_DATA = \ - $(top_srcdir)/installer/.keep \ + .keep \ $(NULL) healthconfigdir=$(libconfigdir)/health.d dist_healthconfig_DATA = \ - $(top_srcdir)/installer/.keep \ health.d/adaptec_raid.conf \ health.d/apache.conf \ health.d/apcupsd.conf \ diff --git a/health/Makefile.in b/health/Makefile.in deleted file mode 100644 index 811f7f239..000000000 --- a/health/Makefile.in +++ /dev/null @@ -1,800 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = health -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_healthconfig_DATA) $(dist_noinst_DATA) \ - $(dist_userhealthconfig_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(healthconfigdir)" \ - "$(DESTDIR)$(userhealthconfigdir)" -DATA = $(dist_healthconfig_DATA) $(dist_noinst_DATA) \ - $(dist_userhealthconfig_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - notifications \ - $(NULL) - -CLEANFILES = \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -userhealthconfigdir = $(configdir)/health.d -dist_userhealthconfig_DATA = \ - $(top_srcdir)/installer/.keep \ - $(NULL) - -healthconfigdir = $(libconfigdir)/health.d -dist_healthconfig_DATA = \ - $(top_srcdir)/installer/.keep \ - health.d/adaptec_raid.conf \ - health.d/apache.conf \ - health.d/apcupsd.conf \ - health.d/backend.conf \ - health.d/bcache.conf \ - health.d/beanstalkd.conf \ - health.d/bind_rndc.conf \ - health.d/boinc.conf \ - health.d/btrfs.conf \ - health.d/ceph.conf \ - health.d/cpu.conf \ - health.d/couchdb.conf \ - health.d/disks.conf \ - health.d/dockerd.conf \ - health.d/elasticsearch.conf \ - health.d/entropy.conf \ - health.d/fping.conf \ - health.d/fronius.conf \ - health.d/haproxy.conf \ - health.d/httpcheck.conf \ - health.d/ipc.conf \ - health.d/ipfs.conf \ - health.d/ipmi.conf \ - health.d/isc_dhcpd.conf \ - health.d/lighttpd.conf \ - health.d/linux_power_supply.conf \ - health.d/load.conf \ - health.d/mdstat.conf \ - health.d/megacli.conf \ - health.d/memcached.conf \ - health.d/memory.conf \ - health.d/mongodb.conf \ - health.d/mysql.conf \ - health.d/named.conf \ - health.d/net.conf \ - health.d/netfilter.conf \ - health.d/nginx.conf \ - health.d/nginx_plus.conf \ - health.d/portcheck.conf \ - health.d/postgres.conf \ - health.d/qos.conf \ - health.d/ram.conf \ - health.d/redis.conf \ - health.d/retroshare.conf \ - health.d/softnet.conf \ - health.d/squid.conf \ - health.d/stiebeleltron.conf \ - health.d/swap.conf \ - health.d/tcp_conn.conf \ - health.d/tcp_listen.conf \ - health.d/tcp_mem.conf \ - health.d/tcp_orphans.conf \ - health.d/tcp_resets.conf \ - health.d/udp_errors.conf \ - health.d/varnish.conf \ - health.d/web_log.conf \ - health.d/zfs.conf \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu health/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu health/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_healthconfigDATA: $(dist_healthconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(healthconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(healthconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(healthconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(healthconfigdir)" || exit $$?; \ - done - -uninstall-dist_healthconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_healthconfig_DATA)'; test -n "$(healthconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(healthconfigdir)'; $(am__uninstall_files_from_dir) -install-dist_userhealthconfigDATA: $(dist_userhealthconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_userhealthconfig_DATA)'; test -n "$(userhealthconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(userhealthconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(userhealthconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userhealthconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(userhealthconfigdir)" || exit $$?; \ - done - -uninstall-dist_userhealthconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_userhealthconfig_DATA)'; test -n "$(userhealthconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(userhealthconfigdir)'; $(am__uninstall_files_from_dir) - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(healthconfigdir)" "$(DESTDIR)$(userhealthconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: install-dist_healthconfigDATA \ - install-dist_userhealthconfigDATA - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-dist_healthconfigDATA \ - uninstall-dist_userhealthconfigDATA - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dist_healthconfigDATA \ - install-dist_userhealthconfigDATA install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs installdirs-am \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \ - uninstall-am uninstall-dist_healthconfigDATA \ - uninstall-dist_userhealthconfigDATA - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/health/README.md b/health/README.md index 5d68d752a..54f6a3e1f 100644 --- a/health/README.md +++ b/health/README.md @@ -9,8 +9,8 @@ netdata, since many charts are dynamically created during runtime (for example, chart tracking network interface packet drops, is automatically created on the first packet dropped). -Netdata also supports alarm **templates**, so that an alarm can be attached to all -the charts of the same context (i.e. all network interfaces, or all disks, or all mysql servers, etc.) +Netdata also supports alarm **templates**, so that an alarm can be attached to all the charts of the same context (i.e. all network interfaces, or all disks, or all mysql servers, etc.). + Each alarm can execute a single query to the database using statistical algorithms against past data, but alarms can be combined. So, if you need 2 queries in the database, you can combine @@ -145,7 +145,7 @@ This is useful when you centralize metrics from multiple hosts, to one netdata. This line is only used in alarm templates. It filters the charts. So, if you need to create an alarm template for a few of a kind of chart (a few of your disks, or a few of your network interfaces, or a few your mysql servers, etc), you can create an alarm template that would -normally be applied to all of them, and filter them by family. +normally be applied to all of them, and filter them by [family](../docs/Charts.md#families). The format is: @@ -153,20 +153,13 @@ The format is: families: SIMPLE PATTERN LIST ``` -Simple patterns list is a lists of space separated patterns. Use ` * ` as wildcard and ` ! ` -for a negative match. Processing is left to right, and on the first hit (positive or negative), -processing stops. - -So. `families: *` means, match anything, while `families: !bad*pattern* *` means anything -except `bad*pattern*` (where `*` is a wildcard to match any sequence of characters). - -The family of a chart is usually the submenu of the netdata dashboard it appears. +The simple pattern syntax and operation is explained in [simple patterns](../libnetdata/simple_pattern/). --- #### Alarm line `lookup` -This lines makes a database lookup to find a value. This result of this lookup is available as `$this`. +This line makes a database lookup to find a value. This result of this lookup is available as `$this`. The format is: @@ -349,6 +342,16 @@ delay: [[[up U] [down D] multiplier M] max X] their matching one) and a delay is in place. - All are reset to their defaults when the alarm switches state without a delay in place. +#### Alarm line `option` + +The only possible value for the `option` line is + +``` +option: no-clear-notification +``` + +For some alarms we need compare two time-frames, to detect anomalies. For example, `health.d/httpcheck.conf` has an alarm template called `web_service_slow` that compares the average http call response time over the last 3 minutes, compared to the average over the last hour. It triggers a warning alarm when the average of the last 3 minutes is twice the average of the last hour. In such cases, it is easy to trigger the alarm, but difficult to tell when the alarm is cleared. As time passes, the newest window moves into the older, so the average response time of the last hour will keep increasing. Eventually, the comparison will find the averages in the two time-frames close enough to clear the alarm. However, the issue was not resolved, it's just a matter of the newer data "polluting" the old. For such alarms, it's a good idea to tell Netdata to not clear the notification, by using the `no-clear-notification` option. + --- ### Expressions @@ -419,10 +422,19 @@ Which in turn, results in the following behavior: ### Variables -netdata supports 3 new internal indexes for variables that will be used in health monitoring: +You can find all the variables that can be used for a given chart, using +`http://your.netdata.ip:19999/api/v1/alarm_variables?chart=CHART_NAME` +Example: [variables for the `system.cpu` chart of the registry](https://registry.my-netdata.io/api/v1/alarm_variables?chart=system.cpu). + +_Hint: If you don't know how to find the CHART_NAME, you can read about it [here](../docs/Charts.md#charts)._ - - **chart local variables**. All the dimensions of the chart are exposed as local variables. - All chart alarms names are exposed as variables too. + +Netdata supports 3 internal indexes for variables that will be used in health monitoring. +
The variables below can be used in both chart alarms and context templates. +Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in templates for charts belonging to the same [context](../docs/Charts.md#contexts). The reason is that all charts of a given contexts are essentially identical, with the only difference being the [family](../docs/Charts.md#families) that identifies a particular hardware or software instance. Charts and templates do not apply to specific families anyway, unless if you explicitly limit an alarm with the [alarm line `families`](#alarm-line-families). +
+ + - **chart local variables**. All the dimensions of the chart are exposed as local variables. The value of $this for the other configured alarms of the chart also appears, under the name of each configured alarm. Charts also define a few special variables: @@ -448,20 +460,15 @@ netdata supports 3 new internal indexes for variables that will be used in healt - **special variables*** are: - - `this`, which is resolved to the value of the current alarm. + - `$this`, which is resolved to the value of the current alarm. - - `status`, which is resolved to the current status of the alarm (the current = the last + - `$status`, which is resolved to the current status of the alarm (the current = the last status, i.e. before the current database lookup and the evaluation of the `calc` line). This values can be compared with `$REMOVED`, `$UNINITIALIZED`, `$UNDEFINED`, `$CLEAR`, - `$WARNING`, `$CRITICAL`. These values are incremental, ie. `$status > $CLEAL` works as + `$WARNING`, `$CRITICAL`. These values are incremental, ie. `$status > $CLEAR` works as expected. - - `now`, which is resolved to current unix timestamp. - -You can find all the variables that can be used for a given chart, using -`http://your.netdata.ip:19999/api/v1/alarm_variables?chart=NAME`. -This will dump all the indexes from the chart's perspective. -Example: [variables for the `system.cpu` chart of the registry](https://registry.my-netdata.io/api/v1/alarm_variables?chart=system.cpu). + - `$now`, which is resolved to current unix timestamp. ## Alarm Statuses @@ -646,3 +653,11 @@ You can find the context of charts by looking up the chart in either You can find how netdata interpreted the expressions by examining the alarm at `http://your.netdata:19999/api/v1/alarms?all`. For each expression, netdata will return the expression as given in its config file, and the same expression with additional parentheses added to indicate the evaluation flow of the expression. +## Disabling health checks or silencing notifications at runtime + +The health checks can be controlled at runtime via the [health management api](../web/api/health/#health-management-api). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() + + + diff --git a/health/health.c b/health/health.c index ae0c464b1..f92a1ba6b 100644 --- a/health/health.c +++ b/health/health.c @@ -2,6 +2,12 @@ #include "health.h" +struct health_cmdapi_thread_status { + int status; + ; + struct rusage rusage; +}; + unsigned int default_health_enabled = 1; // ---------------------------------------------------------------------------- @@ -147,13 +153,41 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) { } } + // Check if alarm notifications are silenced + if (ae->flags & HEALTH_ENTRY_FLAG_SILENCED) { + info("Health not sending notification for alarm '%s.%s' status %s (command API has disabled notifications)", ae->chart, ae->name, rrdcalc_status2string(ae->new_status)); + goto done; + } + static char command_to_run[ALARM_EXEC_COMMAND_LENGTH + 1]; pid_t command_pid; const char *exec = (ae->exec) ? ae->exec : host->health_default_exec; const char *recipient = (ae->recipient) ? ae->recipient : host->health_default_recipient; - snprintfz(command_to_run, ALARM_EXEC_COMMAND_LENGTH, "exec %s '%s' '%s' '%u' '%u' '%u' '%lu' '%s' '%s' '%s' '%s' '%s' '" CALCULATED_NUMBER_FORMAT_ZERO "' '" CALCULATED_NUMBER_FORMAT_ZERO "' '%s' '%u' '%u' '%s' '%s' '%s' '%s'", + int n_warn=0, n_crit=0; + RRDCALC *rc; + EVAL_EXPRESSION *expr=NULL; + + for(rc = host->alarms; rc ; rc = rc->next) { + if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) + continue; + + if(unlikely(rc->status == RRDCALC_STATUS_WARNING)) { + n_warn++; + if (ae->alarm_id == rc->id) + expr=rc->warning; + } else if (unlikely(rc->status == RRDCALC_STATUS_CRITICAL)) { + n_crit++; + if (ae->alarm_id == rc->id) + expr=rc->critical; + } else if (unlikely(rc->status == RRDCALC_STATUS_CLEAR)) { + if (ae->alarm_id == rc->id) + expr=rc->warning; + } + } + + snprintfz(command_to_run, ALARM_EXEC_COMMAND_LENGTH, "exec %s '%s' '%s' '%u' '%u' '%u' '%lu' '%s' '%s' '%s' '%s' '%s' '" CALCULATED_NUMBER_FORMAT_ZERO "' '" CALCULATED_NUMBER_FORMAT_ZERO "' '%s' '%u' '%u' '%s' '%s' '%s' '%s' '%s' '%s' '%d' '%d'", exec, recipient, host->registry_hostname, @@ -162,7 +196,7 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) { ae->alarm_event_id, (unsigned long)ae->when, ae->name, - ae->chart?ae->chart:"NOCAHRT", + ae->chart?ae->chart:"NOCHART", ae->family?ae->family:"NOFAMILY", rrdcalc_status2string(ae->new_status), rrdcalc_status2string(ae->old_status), @@ -174,7 +208,11 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) { ae->units?ae->units:"", ae->info?ae->info:"", ae->new_value_string, - ae->old_value_string + ae->old_value_string, + (expr && expr->source)?expr->source:"NOSOURCE", + (expr && expr->error_msg)?buffer_tostring(expr->error_msg):"NOERRMSG", + n_warn, + n_crit ); ae->flags |= HEALTH_ENTRY_FLAG_EXEC_RUN; @@ -355,6 +393,67 @@ static void health_main_cleanup(void *ptr) { static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; } +SILENCE_TYPE check_silenced(RRDCALC *rc, char* host, SILENCERS *silencers) { + SILENCER *s; + debug(D_HEALTH, "Checking if alarm was silenced via the command API. Alarm info name:%s context:%s chart:%s host:%s family:%s", + rc->name, (rc->rrdset)?rc->rrdset->context:"", rc->chart, host, (rc->rrdset)?rc->rrdset->family:""); + + for (s = silencers->silencers; s!=NULL; s=s->next){ + if ( + (!s->alarms_pattern || (rc->name && s->alarms_pattern && simple_pattern_matches(s->alarms_pattern,rc->name))) && + (!s->contexts_pattern || (rc->rrdset && rc->rrdset->context && s->contexts_pattern && simple_pattern_matches(s->contexts_pattern,rc->rrdset->context))) && + (!s->hosts_pattern || (host && s->hosts_pattern && simple_pattern_matches(s->hosts_pattern,host))) && + (!s->charts_pattern || (rc->chart && s->charts_pattern && simple_pattern_matches(s->charts_pattern,rc->chart))) && + (!s->families_pattern || (rc->rrdset && rc->rrdset->family && s->families_pattern && simple_pattern_matches(s->families_pattern,rc->rrdset->family))) + ) { + debug(D_HEALTH, "Alarm matches command API silence entry %s:%s:%s:%s:%s", s->alarms,s->charts, s->contexts, s->hosts, s->families); + if (unlikely(silencers->stype == STYPE_NONE)) { + debug(D_HEALTH, "Alarm %s matched a silence entry, but no SILENCE or DISABLE command was issued via the command API. The match has no effect.", rc->name); + } else { + debug(D_HEALTH, "Alarm %s via the command API - name:%s context:%s chart:%s host:%s family:%s" + , (silencers->stype==STYPE_DISABLE_ALARMS)?"Disabled":"Silenced" + , rc->name + , (rc->rrdset)?rc->rrdset->context:"" + , rc->chart + , host + , (rc->rrdset)?rc->rrdset->family:"" + ); + } + return silencers->stype; + } + } + return STYPE_NONE; +} + +int update_disabled_silenced(RRDHOST *host, RRDCALC *rc) { + uint32_t rrdcalc_flags_old = rc->rrdcalc_flags; + // Clear the flags + rc->rrdcalc_flags &= ~(RRDCALC_FLAG_DISABLED | RRDCALC_FLAG_SILENCED); + if (unlikely(silencers->all_alarms)) { + if (silencers->stype == STYPE_DISABLE_ALARMS) rc->rrdcalc_flags |= RRDCALC_FLAG_DISABLED; + else if (silencers->stype == STYPE_SILENCE_NOTIFICATIONS) rc->rrdcalc_flags |= RRDCALC_FLAG_SILENCED; + } else { + SILENCE_TYPE st = check_silenced(rc, host->hostname, silencers); + if (st == STYPE_DISABLE_ALARMS) rc->rrdcalc_flags |= RRDCALC_FLAG_DISABLED; + else if (st == STYPE_SILENCE_NOTIFICATIONS) rc->rrdcalc_flags |= RRDCALC_FLAG_SILENCED; + } + + if (rrdcalc_flags_old != rc->rrdcalc_flags) { + info("Alarm silencing changed for host '%s' alarm '%s': Disabled %s->%s Silenced %s->%s", + host->hostname, + rc->name, + (rrdcalc_flags_old & RRDCALC_FLAG_DISABLED)?"true":"false", + (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED)?"true":"false", + (rrdcalc_flags_old & RRDCALC_FLAG_SILENCED)?"true":"false", + (rc->rrdcalc_flags & RRDCALC_FLAG_SILENCED)?"true":"false" + ); + } + if (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED) + return 1; + else + return 0; +} + void *health_main(void *ptr) { netdata_thread_cleanup_push(health_main_cleanup, ptr); @@ -365,371 +464,338 @@ void *health_main(void *ptr) { time_t hibernation_delay = config_get_number(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", 60); unsigned int loop = 0; - while(!netdata_exit) { - loop++; - debug(D_HEALTH, "Health monitoring iteration no %u started", loop); - - int runnable = 0, apply_hibernation_delay = 0; - time_t next_run = now + min_run_every; - RRDCALC *rc; - - if(unlikely(check_if_resumed_from_suspention())) { - apply_hibernation_delay = 1; - - info("Postponing alarm checks for %ld seconds, because it seems that the system was just resumed from suspension." - , hibernation_delay - ); - } - - rrd_rdlock(); - - RRDHOST *host; - rrdhost_foreach_read(host) { - if(unlikely(!host->health_enabled)) - continue; - - if(unlikely(apply_hibernation_delay)) { - info("Postponing health checks for %ld seconds, on host '%s'." - , hibernation_delay - , host->hostname - ); + silencers = mallocz(sizeof(SILENCERS)); + silencers->all_alarms=0; + silencers->stype=STYPE_NONE; + silencers->silencers=NULL; - host->health_delay_up_to = now + hibernation_delay; - } - - if(unlikely(host->health_delay_up_to)) { - if(unlikely(now < host->health_delay_up_to)) - continue; - - info("Resuming health checks on host '%s'.", host->hostname); - host->health_delay_up_to = 0; - } - - rrdhost_rdlock(host); - - // the first loop is to lookup values from the db - for(rc = host->alarms; rc; rc = rc->next) { - if(unlikely(!rrdcalc_isrunnable(rc, now, &next_run))) { - if(unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE)) - rc->rrdcalc_flags &= ~RRDCALC_FLAG_RUNNABLE; - continue; - } - - runnable++; - rc->old_value = rc->value; - rc->rrdcalc_flags |= RRDCALC_FLAG_RUNNABLE; - - // ------------------------------------------------------------ - // if there is database lookup, do it - - if(unlikely(RRDCALC_HAS_DB_LOOKUP(rc))) { - /* time_t old_db_timestamp = rc->db_before; */ - int value_is_null = 0; - - int ret = rrdset2value_api_v1(rc->rrdset - , NULL - , &rc->value - , rc->dimensions - , 1 - , rc->after - , rc->before - , rc->group - , 0 - , rc->options - , &rc->db_after - , &rc->db_before - , &value_is_null - ); - - if(unlikely(ret != 200)) { - // database lookup failed - rc->value = NAN; - rc->rrdcalc_flags |= RRDCALC_FLAG_DB_ERROR; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': database lookup returned error %d" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , ret - ); - } - else - rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_ERROR; - - /* - RRDCALC_FLAG_DB_STALE not currently used - if (unlikely(old_db_timestamp == rc->db_before)) { - // database is stale - - debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name); - - if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))) { - rc->rrdcalc_flags |= RRDCALC_FLAG_DB_STALE; - error("Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name); - } - } - else if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE)) - rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_STALE; - */ - - if(unlikely(value_is_null)) { - // collected value is null - rc->value = NAN; - rc->rrdcalc_flags |= RRDCALC_FLAG_DB_NAN; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': database lookup returned empty value (possibly value is not collected yet)" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - ); - } - else - rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_NAN; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': database lookup gave value " CALCULATED_NUMBER_FORMAT - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->value - ); - } - - // ------------------------------------------------------------ - // if there is calculation expression, run it - - if(unlikely(rc->calculation)) { - if(unlikely(!expression_evaluate(rc->calculation))) { - // calculation failed - rc->value = NAN; - rc->rrdcalc_flags |= RRDCALC_FLAG_CALC_ERROR; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': expression '%s' failed: %s" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->calculation->parsed_as - , buffer_tostring(rc->calculation->error_msg) - ); - } - else { - rc->rrdcalc_flags &= ~RRDCALC_FLAG_CALC_ERROR; - - debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' gave value " CALCULATED_NUMBER_FORMAT ": %s (source: %s)" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->calculation->parsed_as - , rc->calculation->result - , buffer_tostring(rc->calculation->error_msg) - , rc->source - ); - - rc->value = rc->calculation->result; - - if(rc->local) rc->local->last_updated = now; - if(rc->family) rc->family->last_updated = now; - if(rc->hostid) rc->hostid->last_updated = now; - if(rc->hostname) rc->hostname->last_updated = now; - } - } - } - rrdhost_unlock(host); - - if(unlikely(runnable && !netdata_exit)) { - rrdhost_rdlock(host); - - for(rc = host->alarms; rc; rc = rc->next) { - if(unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE))) - continue; - - RRDCALC_STATUS warning_status = RRDCALC_STATUS_UNDEFINED; - RRDCALC_STATUS critical_status = RRDCALC_STATUS_UNDEFINED; - - // -------------------------------------------------------- - // check the warning expression - - if(likely(rc->warning)) { - if(unlikely(!expression_evaluate(rc->warning))) { - // calculation failed - rc->rrdcalc_flags |= RRDCALC_FLAG_WARN_ERROR; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': warning expression failed with error: %s" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , buffer_tostring(rc->warning->error_msg) - ); - } - else { - rc->rrdcalc_flags &= ~RRDCALC_FLAG_WARN_ERROR; - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': warning expression gave value " CALCULATED_NUMBER_FORMAT ": %s (source: %s)" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->warning->result - , buffer_tostring(rc->warning->error_msg) - , rc->source - ); - warning_status = rrdcalc_value2status(rc->warning->result); - } - } - - // -------------------------------------------------------- - // check the critical expression - - if(likely(rc->critical)) { - if(unlikely(!expression_evaluate(rc->critical))) { - // calculation failed - rc->rrdcalc_flags |= RRDCALC_FLAG_CRIT_ERROR; - - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': critical expression failed with error: %s" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , buffer_tostring(rc->critical->error_msg) - ); - } - else { - rc->rrdcalc_flags &= ~RRDCALC_FLAG_CRIT_ERROR; - debug(D_HEALTH - , "Health on host '%s', alarm '%s.%s': critical expression gave value " CALCULATED_NUMBER_FORMAT ": %s (source: %s)" - , host->hostname - , rc->chart ? rc->chart : "NOCHART" - , rc->name - , rc->critical->result - , buffer_tostring(rc->critical->error_msg) - , rc->source - ); - critical_status = rrdcalc_value2status(rc->critical->result); - } - } - - // -------------------------------------------------------- - // decide the final alarm status - - RRDCALC_STATUS status = RRDCALC_STATUS_UNDEFINED; - - switch(warning_status) { - case RRDCALC_STATUS_CLEAR: - status = RRDCALC_STATUS_CLEAR; - break; - - case RRDCALC_STATUS_RAISED: - status = RRDCALC_STATUS_WARNING; - break; - - default: - break; - } - - switch(critical_status) { - case RRDCALC_STATUS_CLEAR: - if(status == RRDCALC_STATUS_UNDEFINED) - status = RRDCALC_STATUS_CLEAR; - break; - - case RRDCALC_STATUS_RAISED: - status = RRDCALC_STATUS_CRITICAL; - break; - - default: - break; - } - - // -------------------------------------------------------- - // check if the new status and the old differ - - if(status != rc->status) { - int delay = 0; - - // apply trigger hysteresis - - if(now > rc->delay_up_to_timestamp) { - rc->delay_up_current = rc->delay_up_duration; - rc->delay_down_current = rc->delay_down_duration; - rc->delay_last = 0; - rc->delay_up_to_timestamp = 0; - } - else { - rc->delay_up_current = (int) (rc->delay_up_current * rc->delay_multiplier); - if(rc->delay_up_current > rc->delay_max_duration) - rc->delay_up_current = rc->delay_max_duration; - - rc->delay_down_current = (int) (rc->delay_down_current * rc->delay_multiplier); - if(rc->delay_down_current > rc->delay_max_duration) - rc->delay_down_current = rc->delay_max_duration; - } - - if(status > rc->status) - delay = rc->delay_up_current; - else - delay = rc->delay_down_current; - - // COMMENTED: because we do need to send raising alarms - // if(now + delay < rc->delay_up_to_timestamp) - // delay = (int)(rc->delay_up_to_timestamp - now); - - rc->delay_last = delay; - rc->delay_up_to_timestamp = now + delay; - - // add the alarm into the log - - health_alarm_log( - host - , rc->id - , rc->next_event_id++ - , now - , rc->name - , rc->rrdset->id - , rc->rrdset->family - , rc->exec - , rc->recipient - , now - rc->last_status_change - , rc->old_value - , rc->value - , rc->status - , status - , rc->source - , rc->units - , rc->info - , rc->delay_last - , (rc->options & RRDCALC_FLAG_NO_CLEAR_NOTIFICATION) ? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0 - ); - - rc->last_status_change = now; - rc->status = status; - } - - rc->last_updated = now; - rc->next_update = now + rc->update_every; + while(!netdata_exit) { + loop++; + debug(D_HEALTH, "Health monitoring iteration no %u started", loop); - if(next_run > rc->next_update) - next_run = rc->next_update; - } + int runnable = 0, apply_hibernation_delay = 0; + time_t next_run = now + min_run_every; + RRDCALC *rc; + + if (unlikely(check_if_resumed_from_suspention())) { + apply_hibernation_delay = 1; + + info("Postponing alarm checks for %ld seconds, because it seems that the system was just resumed from suspension.", + hibernation_delay + ); + } + + if (unlikely(silencers->all_alarms && silencers->stype == STYPE_DISABLE_ALARMS)) { + static int logged=0; + if (!logged) { + info("Skipping health checks, because all alarms are disabled via a %s command.", + HEALTH_CMDAPI_CMD_DISABLEALL); + logged = 1; + } + } + + rrd_rdlock(); + + RRDHOST *host; + rrdhost_foreach_read(host) { + if (unlikely(!host->health_enabled)) + continue; + + if (unlikely(apply_hibernation_delay)) { + + info("Postponing health checks for %ld seconds, on host '%s'.", hibernation_delay, host->hostname + ); + + host->health_delay_up_to = now + hibernation_delay; + } + + if (unlikely(host->health_delay_up_to)) { + if (unlikely(now < host->health_delay_up_to)) + continue; + + info("Resuming health checks on host '%s'.", host->hostname); + host->health_delay_up_to = 0; + } + + rrdhost_rdlock(host); + + // the first loop is to lookup values from the db + for (rc = host->alarms; rc; rc = rc->next) { + + if (update_disabled_silenced(host, rc)) + continue; + + if (unlikely(!rrdcalc_isrunnable(rc, now, &next_run))) { + if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE)) + rc->rrdcalc_flags &= ~RRDCALC_FLAG_RUNNABLE; + continue; + } + + runnable++; + rc->old_value = rc->value; + rc->rrdcalc_flags |= RRDCALC_FLAG_RUNNABLE; + + // ------------------------------------------------------------ + // if there is database lookup, do it + + if (unlikely(RRDCALC_HAS_DB_LOOKUP(rc))) { + /* time_t old_db_timestamp = rc->db_before; */ + int value_is_null = 0; + + int ret = rrdset2value_api_v1(rc->rrdset, NULL, &rc->value, rc->dimensions, 1, rc->after, + rc->before, rc->group, 0, rc->options, &rc->db_after, + &rc->db_before, &value_is_null + ); + + if (unlikely(ret != 200)) { + // database lookup failed + rc->value = NAN; + rc->rrdcalc_flags |= RRDCALC_FLAG_DB_ERROR; + + debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup returned error %d", + host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name, ret + ); + } else + rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_ERROR; + + /* - RRDCALC_FLAG_DB_STALE not currently used + if (unlikely(old_db_timestamp == rc->db_before)) { + // database is stale + + debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name); + + if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))) { + rc->rrdcalc_flags |= RRDCALC_FLAG_DB_STALE; + error("Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name); + } + } + else if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE)) + rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_STALE; + */ + + if (unlikely(value_is_null)) { + // collected value is null + rc->value = NAN; + rc->rrdcalc_flags |= RRDCALC_FLAG_DB_NAN; + + debug(D_HEALTH, + "Health on host '%s', alarm '%s.%s': database lookup returned empty value (possibly value is not collected yet)", + host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name + ); + } else + rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_NAN; + + debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup gave value " + CALCULATED_NUMBER_FORMAT, host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name, + rc->value + ); + } + + // ------------------------------------------------------------ + // if there is calculation expression, run it + + if (unlikely(rc->calculation)) { + if (unlikely(!expression_evaluate(rc->calculation))) { + // calculation failed + rc->value = NAN; + rc->rrdcalc_flags |= RRDCALC_FLAG_CALC_ERROR; + + debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' failed: %s", + host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name, + rc->calculation->parsed_as, buffer_tostring(rc->calculation->error_msg) + ); + } else { + rc->rrdcalc_flags &= ~RRDCALC_FLAG_CALC_ERROR; + + debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' gave value " + CALCULATED_NUMBER_FORMAT + ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name, + rc->calculation->parsed_as, rc->calculation->result, + buffer_tostring(rc->calculation->error_msg), rc->source + ); + + rc->value = rc->calculation->result; + + if (rc->local) rc->local->last_updated = now; + if (rc->family) rc->family->last_updated = now; + if (rc->hostid) rc->hostid->last_updated = now; + if (rc->hostname) rc->hostname->last_updated = now; + } + } + } + + rrdhost_unlock(host); + + if (unlikely(runnable && !netdata_exit)) { + rrdhost_rdlock(host); + + for (rc = host->alarms; rc; rc = rc->next) { + if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_RUNNABLE))) + continue; + + if (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED) { + continue; + } + RRDCALC_STATUS warning_status = RRDCALC_STATUS_UNDEFINED; + RRDCALC_STATUS critical_status = RRDCALC_STATUS_UNDEFINED; + + // -------------------------------------------------------- + // check the warning expression + + if (likely(rc->warning)) { + if (unlikely(!expression_evaluate(rc->warning))) { + // calculation failed + rc->rrdcalc_flags |= RRDCALC_FLAG_WARN_ERROR; + + debug(D_HEALTH, + "Health on host '%s', alarm '%s.%s': warning expression failed with error: %s", + host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name, + buffer_tostring(rc->warning->error_msg) + ); + } else { + rc->rrdcalc_flags &= ~RRDCALC_FLAG_WARN_ERROR; + debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': warning expression gave value " + CALCULATED_NUMBER_FORMAT + ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART", + rc->name, rc->warning->result, buffer_tostring(rc->warning->error_msg), rc->source + ); + warning_status = rrdcalc_value2status(rc->warning->result); + } + } + + // -------------------------------------------------------- + // check the critical expression + + if (likely(rc->critical)) { + if (unlikely(!expression_evaluate(rc->critical))) { + // calculation failed + rc->rrdcalc_flags |= RRDCALC_FLAG_CRIT_ERROR; + + debug(D_HEALTH, + "Health on host '%s', alarm '%s.%s': critical expression failed with error: %s", + host->hostname, rc->chart ? rc->chart : "NOCHART", rc->name, + buffer_tostring(rc->critical->error_msg) + ); + } else { + rc->rrdcalc_flags &= ~RRDCALC_FLAG_CRIT_ERROR; + debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': critical expression gave value " + CALCULATED_NUMBER_FORMAT + ": %s (source: %s)", host->hostname, rc->chart ? rc->chart : "NOCHART", + rc->name, rc->critical->result, buffer_tostring(rc->critical->error_msg), + rc->source + ); + critical_status = rrdcalc_value2status(rc->critical->result); + } + } + + // -------------------------------------------------------- + // decide the final alarm status + + RRDCALC_STATUS status = RRDCALC_STATUS_UNDEFINED; + + switch (warning_status) { + case RRDCALC_STATUS_CLEAR: + status = RRDCALC_STATUS_CLEAR; + break; + + case RRDCALC_STATUS_RAISED: + status = RRDCALC_STATUS_WARNING; + break; + + default: + break; + } + + switch (critical_status) { + case RRDCALC_STATUS_CLEAR: + if (status == RRDCALC_STATUS_UNDEFINED) + status = RRDCALC_STATUS_CLEAR; + break; + + case RRDCALC_STATUS_RAISED: + status = RRDCALC_STATUS_CRITICAL; + break; + + default: + break; + } + + // -------------------------------------------------------- + // check if the new status and the old differ + + if (status != rc->status) { + int delay = 0; + + // apply trigger hysteresis + + if (now > rc->delay_up_to_timestamp) { + rc->delay_up_current = rc->delay_up_duration; + rc->delay_down_current = rc->delay_down_duration; + rc->delay_last = 0; + rc->delay_up_to_timestamp = 0; + } else { + rc->delay_up_current = (int) (rc->delay_up_current * rc->delay_multiplier); + if (rc->delay_up_current > rc->delay_max_duration) + rc->delay_up_current = rc->delay_max_duration; + + rc->delay_down_current = (int) (rc->delay_down_current * rc->delay_multiplier); + if (rc->delay_down_current > rc->delay_max_duration) + rc->delay_down_current = rc->delay_max_duration; + } + + if (status > rc->status) + delay = rc->delay_up_current; + else + delay = rc->delay_down_current; + + // COMMENTED: because we do need to send raising alarms + // if(now + delay < rc->delay_up_to_timestamp) + // delay = (int)(rc->delay_up_to_timestamp - now); + + rc->delay_last = delay; + rc->delay_up_to_timestamp = now + delay; + + health_alarm_log( + host, rc->id, rc->next_event_id++, now, rc->name, rc->rrdset->id, + rc->rrdset->family, rc->exec, rc->recipient, now - rc->last_status_change, + rc->old_value, rc->value, rc->status, status, rc->source, rc->units, rc->info, + rc->delay_last, + ( + ((rc->options & RRDCALC_FLAG_NO_CLEAR_NOTIFICATION)? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0) | + ((rc->rrdcalc_flags & RRDCALC_FLAG_SILENCED)? HEALTH_ENTRY_FLAG_SILENCED : 0) + ) + + ); + + rc->last_status_change = now; + rc->status = status; + } + + rc->last_updated = now; + rc->next_update = now + rc->update_every; + + if (next_run > rc->next_update) + next_run = rc->next_update; + } + + rrdhost_unlock(host); + } - rrdhost_unlock(host); - } + if (unlikely(netdata_exit)) + break; - if(unlikely(netdata_exit)) - break; + // execute notifications + // and cleanup + health_alarm_log_process(host); - // execute notifications - // and cleanup - health_alarm_log_process(host); + if (unlikely(netdata_exit)) + break; - if(unlikely(netdata_exit)) - break; + } /* rrdhost_foreach */ - } /* rrdhost_foreach */ + rrd_unlock(); - rrd_unlock(); if(unlikely(netdata_exit)) break; diff --git a/health/health.d/linux_power_supply.conf b/health/health.d/linux_power_supply.conf index 27a172a14..745d2c3dd 100644 --- a/health/health.d/linux_power_supply.conf +++ b/health/health.d/linux_power_supply.conf @@ -1,7 +1,7 @@ # Alert on low battery capacity. template: linux_power_supply_capacity - on: power_supply.capacity + on: powersupply.capacity calc: $capacity units: % every: 10s diff --git a/health/health.d/mdstat.conf b/health/health.d/mdstat.conf index 0f5f2837e..a53ec7a56 100644 --- a/health/health.d/mdstat.conf +++ b/health/health.d/mdstat.conf @@ -25,3 +25,13 @@ template: mdstat_mismatch_cnt crit: $this > 0 info: Mismatch count! to: sysadmin + +template: mdstat_nonredundant_last_collected + on: md.nonredundant + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + info: number of seconds since the last successful data collection + to: sysadmin \ No newline at end of file diff --git a/health/health.d/web_log.conf b/health/health.d/web_log.conf index d8be88b47..031adc2ea 100644 --- a/health/health.d/web_log.conf +++ b/health/health.d/web_log.conf @@ -85,6 +85,36 @@ families: * info: the ratio of HTTP internal server errors (5xx), over the last minute to: webmaster +# unmatched lines + +# the following alarms trigger only when there are enough data. +# we assume there are enough data when: +# +# $1m_total_requests > 120 +# +# i.e. when there are at least 120 requests during the last minute + +template: 1m_total_requests + on: web_log.response_codes +families: * + lookup: sum -1m unaligned + calc: ($this == 0)?(1):($this) + units: requests + every: 10s + info: the sum of all HTTP requests over the last minute + +template: 1m_unmatched +on: web_log.response_codes +families: * + lookup: sum -1m unaligned of unmatched + calc: $this * 100 / $1m_total_requests + units: % + every: 10s + warn: ($1m_total_requests > 120) ? ($this > 1) : ( 0 ) + crit: ($1m_total_requests > 120) ? ($this > 5) : ( 0 ) + delay: up 1m down 5m multiplier 1.5 max 1h + info: the ratio of unmatched lines, over the last minute + to: webmaster # ----------------------------------------------------------------------------- # web slow diff --git a/health/health.h b/health/health.h index ff7a4d9bf..ff10fd6d7 100644 --- a/health/health.h +++ b/health/health.h @@ -22,9 +22,74 @@ extern unsigned int default_health_enabled; #define HEALTH_ENTRY_FLAG_UPDATED 0x00000002 #define HEALTH_ENTRY_FLAG_EXEC_RUN 0x00000004 #define HEALTH_ENTRY_FLAG_EXEC_FAILED 0x00000008 +#define HEALTH_ENTRY_FLAG_SILENCED 0x00000008 + #define HEALTH_ENTRY_FLAG_SAVED 0x10000000 #define HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION 0x80000000 +#ifndef HEALTH_LISTEN_PORT +#define HEALTH_LISTEN_PORT 19998 +#endif + +#ifndef HEALTH_LISTEN_BACKLOG +#define HEALTH_LISTEN_BACKLOG 4096 +#endif + +#define HEALTH_ALARM_KEY "alarm" +#define HEALTH_TEMPLATE_KEY "template" +#define HEALTH_ON_KEY "on" +#define HEALTH_CONTEXT_KEY "context" +#define HEALTH_CHART_KEY "chart" +#define HEALTH_HOST_KEY "hosts" +#define HEALTH_OS_KEY "os" +#define HEALTH_FAMILIES_KEY "families" +#define HEALTH_LOOKUP_KEY "lookup" +#define HEALTH_CALC_KEY "calc" +#define HEALTH_EVERY_KEY "every" +#define HEALTH_GREEN_KEY "green" +#define HEALTH_RED_KEY "red" +#define HEALTH_WARN_KEY "warn" +#define HEALTH_CRIT_KEY "crit" +#define HEALTH_EXEC_KEY "exec" +#define HEALTH_RECIPIENT_KEY "to" +#define HEALTH_UNITS_KEY "units" +#define HEALTH_INFO_KEY "info" +#define HEALTH_DELAY_KEY "delay" +#define HEALTH_OPTIONS_KEY "options" + +typedef struct silencer { + char *alarms; + SIMPLE_PATTERN *alarms_pattern; + + char *hosts; + SIMPLE_PATTERN *hosts_pattern; + + char *contexts; + SIMPLE_PATTERN *contexts_pattern; + + char *charts; + SIMPLE_PATTERN *charts_pattern; + + char *families; + SIMPLE_PATTERN *families_pattern; + + struct silencer *next; +} SILENCER; + +typedef enum silence_type { + STYPE_NONE, + STYPE_DISABLE_ALARMS, + STYPE_SILENCE_NOTIFICATIONS +} SILENCE_TYPE; + +typedef struct silencers { + int all_alarms; + SILENCE_TYPE stype; + SILENCER *silencers; +} SILENCERS; + +SILENCERS *silencers; + extern void health_init(void); extern void *health_main(void *ptr); @@ -62,8 +127,7 @@ extern void health_alarm_log( const char *units, const char *info, int delay, - uint32_t flags -); + uint32_t flags); extern void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath); extern char *health_user_config_dir(void); @@ -73,4 +137,6 @@ extern void health_alarm_log_free(RRDHOST *host); extern void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae); +extern void *health_cmdapi_thread(void *ptr); + #endif //NETDATA_HEALTH_H diff --git a/health/health_config.c b/health/health_config.c index d4cf78d97..35fde90bc 100644 --- a/health/health_config.c +++ b/health/health_config.c @@ -853,6 +853,9 @@ static int health_readfile(const char *filename, void *data) { } void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath) { - if(unlikely(!host->health_enabled)) return; + if(unlikely(!host->health_enabled)) { + debug(D_HEALTH, "CONFIG health is not enabled for host '%s'", host->hostname); + return; + } recursive_config_double_dir_load(user_path, stock_path, subpath, health_readfile, (void *) host, 0); } diff --git a/health/health_json.c b/health/health_json.c index a049dc1b2..781132447 100644 --- a/health/health_json.c +++ b/health/health_json.c @@ -43,6 +43,7 @@ static inline void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, R "\t\t\"updates_id\": %u,\n" "\t\t\"value_string\": \"%s\",\n" "\t\t\"old_value_string\": \"%s\",\n" + "\t\t\"silenced\": \"%s\",\n" , host->hostname , ae->unique_id , ae->alarm_id @@ -70,6 +71,7 @@ static inline void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, R , ae->updates_id , ae->new_value_string , ae->old_value_string + , (ae->flags & HEALTH_ENTRY_FLAG_SILENCED)?"true":"false" ); health_string2json(wb, "\t\t", "info", ae->info?ae->info:"", ",\n"); @@ -120,6 +122,8 @@ static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC "\t\t\t\"chart\": \"%s\",\n" "\t\t\t\"family\": \"%s\",\n" "\t\t\t\"active\": %s,\n" + "\t\t\t\"disabled\": %s,\n" + "\t\t\t\"silenced\": %s,\n" "\t\t\t\"exec\": \"%s\",\n" "\t\t\t\"recipient\": \"%s\",\n" "\t\t\t\"source\": \"%s\",\n" @@ -143,6 +147,8 @@ static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC , rc->chart , (rc->rrdset && rc->rrdset->family)?rc->rrdset->family:"" , (rc->rrdset)?"true":"false" + , (rc->rrdcalc_flags & RRDCALC_FLAG_DISABLED)?"true":"false" + , (rc->rrdcalc_flags & RRDCALC_FLAG_SILENCED)?"true":"false" , rc->exec?rc->exec:host->health_default_exec , rc->recipient?rc->recipient:host->health_default_recipient , rc->source diff --git a/health/health_log.c b/health/health_log.c index dd51be2af..009e42673 100644 --- a/health/health_log.c +++ b/health/health_log.c @@ -396,7 +396,6 @@ inline void health_alarm_log( ae->duration = duration; ae->delay = delay; ae->delay_up_to_timestamp = when + delay; - ae->flags |= flags; if(ae->old_status == RRDCALC_STATUS_WARNING || ae->old_status == RRDCALC_STATUS_CRITICAL) diff --git a/health/notifications/Makefile.in b/health/notifications/Makefile.in deleted file mode 100644 index 05a5814fc..000000000 --- a/health/notifications/Makefile.in +++ /dev/null @@ -1,754 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - -# SPDX-License-Identifier: GPL-3.0-or-later - -# THIS IS NOT A COMPLETE Makefile -# IT IS INCLUDED BY ITS PARENT'S Makefile.am -# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/build/subst.inc \ - $(srcdir)/alerta/Makefile.inc $(srcdir)/awssns/Makefile.inc \ - $(srcdir)/discord/Makefile.inc $(srcdir)/email/Makefile.inc \ - $(srcdir)/flock/Makefile.inc $(srcdir)/irc/Makefile.inc \ - $(srcdir)/kavenegar/Makefile.inc \ - $(srcdir)/messagebird/Makefile.inc \ - $(srcdir)/pagerduty/Makefile.inc \ - $(srcdir)/pushbullet/Makefile.inc \ - $(srcdir)/pushover/Makefile.inc \ - $(srcdir)/rocketchat/Makefile.inc $(srcdir)/slack/Makefile.inc \ - $(srcdir)/syslog/Makefile.inc $(srcdir)/telegram/Makefile.inc \ - $(srcdir)/twilio/Makefile.inc $(srcdir)/web/Makefile.inc \ - $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_plugins_SCRIPTS) $(dist_libconfig_DATA) \ - $(dist_noinst_DATA) -subdir = health/notifications -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(pluginsdir)" \ - "$(DESTDIR)$(libconfigdir)" -SCRIPTS = $(dist_plugins_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - alarm-notify.sh \ - $(NULL) - -SUFFIXES = .in -dist_libconfig_DATA = \ - health_alarm_notify.conf \ - health_email_recipients.conf \ - $(NULL) - -dist_plugins_SCRIPTS = \ - alarm-notify.sh \ - alarm-email.sh \ - alarm-test.sh \ - $(NULL) - - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files - -# install these files -dist_noinst_DATA = alarm-notify.sh.in README.md $(NULL) \ - alerta/README.md alerta/Makefile.inc $(NULL) awssns/README.md \ - awssns/Makefile.inc $(NULL) discord/README.md \ - discord/Makefile.inc $(NULL) email/README.md \ - email/Makefile.inc $(NULL) flock/README.md flock/Makefile.inc \ - $(NULL) irc/README.md irc/Makefile.inc $(NULL) \ - kavenegar/README.md kavenegar/Makefile.inc $(NULL) \ - messagebird/README.md messagebird/Makefile.inc $(NULL) \ - pagerduty/README.md pagerduty/Makefile.inc $(NULL) \ - pushbullet/README.md pushbullet/Makefile.inc $(NULL) \ - pushover/README.md pushover/Makefile.inc $(NULL) \ - rocketchat/README.md rocketchat/Makefile.inc $(NULL) \ - slack/README.md slack/Makefile.inc $(NULL) syslog/README.md \ - syslog/Makefile.inc $(NULL) telegram/README.md \ - telegram/Makefile.inc $(NULL) twilio/README.md \ - twilio/Makefile.inc $(NULL) web/README.md web/Makefile.inc \ - $(NULL) -all: all-am - -.SUFFIXES: -.SUFFIXES: .in -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/alerta/Makefile.inc $(srcdir)/awssns/Makefile.inc $(srcdir)/discord/Makefile.inc $(srcdir)/email/Makefile.inc $(srcdir)/flock/Makefile.inc $(srcdir)/irc/Makefile.inc $(srcdir)/kavenegar/Makefile.inc $(srcdir)/messagebird/Makefile.inc $(srcdir)/pagerduty/Makefile.inc $(srcdir)/pushbullet/Makefile.inc $(srcdir)/pushover/Makefile.inc $(srcdir)/rocketchat/Makefile.inc $(srcdir)/slack/Makefile.inc $(srcdir)/syslog/Makefile.inc $(srcdir)/telegram/Makefile.inc $(srcdir)/twilio/Makefile.inc $(srcdir)/web/Makefile.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu health/notifications/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu health/notifications/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/build/subst.inc $(srcdir)/alerta/Makefile.inc $(srcdir)/awssns/Makefile.inc $(srcdir)/discord/Makefile.inc $(srcdir)/email/Makefile.inc $(srcdir)/flock/Makefile.inc $(srcdir)/irc/Makefile.inc $(srcdir)/kavenegar/Makefile.inc $(srcdir)/messagebird/Makefile.inc $(srcdir)/pagerduty/Makefile.inc $(srcdir)/pushbullet/Makefile.inc $(srcdir)/pushover/Makefile.inc $(srcdir)/rocketchat/Makefile.inc $(srcdir)/slack/Makefile.inc $(srcdir)/syslog/Makefile.inc $(srcdir)/telegram/Makefile.inc $(srcdir)/twilio/Makefile.inc $(srcdir)/web/Makefile.inc: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_pluginsSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir) -install-dist_libconfigDATA: $(dist_libconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ - done - -uninstall-dist_libconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(libconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_libconfigDATA \ - install-dist_pluginsSCRIPTS - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_libconfigDATA \ - uninstall-dist_pluginsSCRIPTS - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_libconfigDATA \ - install-dist_pluginsSCRIPTS install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS - -.in: - if sed \ - -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ - -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ - -e 's#[@]configdir_POST@#$(configdir)#g' \ - -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ - -e 's#[@]cachedir_POST@#$(cachedir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/health/notifications/README.md b/health/notifications/README.md index c06638ade..5b7b43406 100644 --- a/health/notifications/README.md +++ b/health/notifications/README.md @@ -58,3 +58,9 @@ export NETDATA_ALARM_NOTIFY_DEBUG=1 # send test alarms to any role /usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE" ``` +If you need to dig even deeper, you can trace the execution with `bash -x`. Note that in test mode, alarm-notify.sh calls itself with many more arguments. So first do + ```sh + bash -x /usr/libexec/netdata/plugins.d/alarm-notify.sh test + ``` + Then look in the output for the alarm-notify.sh calls and run the one you want to trace with `bash -x`. +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/alarm-notify.sh b/health/notifications/alarm-notify.sh deleted file mode 100644 index 3331dcd94..000000000 --- a/health/notifications/alarm-notify.sh +++ /dev/null @@ -1,2407 +0,0 @@ -#!/usr/bin/env bash - -# netdata -# real-time performance and health monitoring, done right! -# (C) 2017 Costa Tsaousis -# SPDX-License-Identifier: GPL-3.0-or-later -# -# Script to send alarm notifications for netdata -# -# Features: -# - multiple notification methods -# - multiple roles per alarm -# - multiple recipients per role -# - severity filtering per recipient -# -# Supported notification methods: -# - emails by @ktsaou -# - slack.com notifications by @ktsaou -# - alerta.io notifications by @kattunga -# - discordapp.com notifications by @lowfive -# - pushover.net notifications by @ktsaou -# - pushbullet.com push notifications by Tiago Peralta @tperalta82 #1070 -# - telegram.org notifications by @hashworks #1002 -# - twilio.com notifications by Levi Blaney @shadycuz #1211 -# - kafka notifications by @ktsaou #1342 -# - pagerduty.com notifications by Jim Cooley @jimcooley #1373 -# - messagebird.com notifications by @tech_no_logical #1453 -# - hipchat notifications by @ktsaou #1561 -# - fleep notifications by @Ferroin -# - custom notifications by @ktsaou -# - syslog messages by @Ferroin -# - Microsoft Team notification by @tioumen - -# ----------------------------------------------------------------------------- -# testing notifications - - -if [ \( "${1}" = "test" -o "${2}" = "test" \) -a "${#}" -le 2 ] -then - if [ "${2}" = "test" ] - then - recipient="${1}" - else - recipient="${2}" - fi - - [ -z "${recipient}" ] && recipient="sysadmin" - - id=1 - last="CLEAR" - test_res=0 - for x in "WARNING" "CRITICAL" "CLEAR" - do - echo >&2 - echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}" - - "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value" - if [ $? -ne 0 ] - then - echo >&2 "# FAILED" - test_res=1 - else - echo >&2 "# OK" - fi - - last="${x}" - id=$((id + 1)) - done - - exit $test_res -fi - -export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin" -export LC_ALL=C - -# ----------------------------------------------------------------------------- - -PROGRAM_NAME="$(basename "${0}")" - -logdate() { - date "+%Y-%m-%d %H:%M:%S" -} - -log() { - local status="${1}" - shift - - echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}" - -} - -warning() { - log WARNING "${@}" -} - -error() { - log ERROR "${@}" -} - -info() { - log INFO "${@}" -} - -fatal() { - log FATAL "${@}" - exit 1 -} - -debug=${NETDATA_ALARM_NOTIFY_DEBUG-0} -debug() { - [ "${debug}" = "1" ] && log DEBUG "${@}" -} - -docurl() { - if [ -z "${curl}" ] - then - error "\${curl} is unset." - return 1 - fi - - if [ "${debug}" = "1" ] - then - echo >&2 "--- BEGIN curl command ---" - printf >&2 "%q " ${curl} "${@}" - echo >&2 - echo >&2 "--- END curl command ---" - - local out=$(mktemp /tmp/netdata-health-alarm-notify-XXXXXXXX) - local code=$(${curl} ${curl_options} --write-out %{http_code} --output "${out}" --silent --show-error "${@}") - local ret=$? - echo >&2 "--- BEGIN received response ---" - cat >&2 "${out}" - echo >&2 - echo >&2 "--- END received response ---" - echo >&2 "RECEIVED HTTP RESPONSE CODE: ${code}" - rm "${out}" - echo "${code}" - return ${ret} - fi - - ${curl} ${curl_options} --write-out %{http_code} --output /dev/null --silent --show-error "${@}" - return $? -} - -# ----------------------------------------------------------------------------- -# this is to be overwritten by the config file - -custom_sender() { - info "not sending custom notification for ${status} of '${host}.${chart}.${name}'" -} - - -# ----------------------------------------------------------------------------- - -# check for BASH v4+ (required for associative arrays) -[ $(( ${BASH_VERSINFO[0]} )) -lt 4 ] && \ - fatal "BASH version 4 or later is required (this is ${BASH_VERSION})." - -# ----------------------------------------------------------------------------- -# defaults to allow running this script by hand - -[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" -[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" -[ -z "${NETDATA_CACHE_DIR}" ] && NETDATA_CACHE_DIR="/usr/local/var/cache/netdata" -[ -z "${NETDATA_REGISTRY_URL}" ] && NETDATA_REGISTRY_URL="https://registry.my-netdata.io" - -# ----------------------------------------------------------------------------- -# parse command line parameters - -roles="${1}" # the roles that should be notified for this event -host="${2}" # the host generated this event -unique_id="${3}" # the unique id of this event -alarm_id="${4}" # the unique id of the alarm that generated this event -event_id="${5}" # the incremental id of the event, for this alarm id -when="${6}" # the timestamp this event occurred -name="${7}" # the name of the alarm, as given in netdata health.d entries -chart="${8}" # the name of the chart (type.id) -family="${9}" # the family of the chart -status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL -old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL -value="${12}" # the current value of the alarm -old_value="${13}" # the previous value of the alarm -src="${14}" # the line number and file the alarm has been configured -duration="${15}" # the duration in seconds of the previous alarm state -non_clear_duration="${16}" # the total duration in seconds this is/was non-clear -units="${17}" # the units of the value -info="${18}" # a short description of the alarm -value_string="${19}" # friendly value (with units) -old_value_string="${20}" # friendly old value (with units) - -# ----------------------------------------------------------------------------- -# find a suitable hostname to use, if netdata did not supply a hostname - -this_host=$(hostname -s 2>/dev/null) -[ -z "${host}" ] && host="${this_host}" - -# ----------------------------------------------------------------------------- -# screen statuses we don't need to send a notification - -# don't do anything if this is not WARNING, CRITICAL or CLEAR -if [ "${status}" != "WARNING" -a "${status}" != "CRITICAL" -a "${status}" != "CLEAR" ] -then - info "not sending notification for ${status} of '${host}.${chart}.${name}'" - exit 1 -fi - -# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL -if [ "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ] -then - info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})" - exit 1 -fi - -# ----------------------------------------------------------------------------- -# load configuration - -# By default fetch images from the global public registry. -# This is required by default, since all notification methods need to download -# images via the Internet, and private registries might not be reachable. -# This can be overwritten at the configuration file. -images_base_url="https://registry.my-netdata.io" - -# curl options to use -curl_options="" - -# needed commands -# if empty they will be searched in the system path -curl= -sendmail= - -# enable / disable features -SEND_SLACK="YES" -SEND_MSTEAM="YES" -SEND_ALERTA="YES" -SEND_FLOCK="YES" -SEND_DISCORD="YES" -SEND_PUSHOVER="YES" -SEND_TWILIO="YES" -SEND_HIPCHAT="YES" -SEND_MESSAGEBIRD="YES" -SEND_KAVENEGAR="YES" -SEND_TELEGRAM="YES" -SEND_EMAIL="YES" -SEND_PUSHBULLET="YES" -SEND_KAFKA="YES" -SEND_PD="YES" -SEND_FLEEP="YES" -SEND_IRC="YES" -SEND_AWSSNS="YES" -SEND_SYSLOG="NO" -SEND_CUSTOM="YES" - -# slack configs -SLACK_WEBHOOK_URL= -DEFAULT_RECIPIENT_SLACK= -declare -A role_recipients_slack=() - -# Microsoft Team configs -MSTEAM_WEBHOOK_URL= -DEFAULT_RECIPIENT_MSTEAM= -declare -A role_recipients_msteam=() - -# rocketchat configs -ROCKETCHAT_WEBHOOK_URL= -DEFAULT_RECIPIENT_ROCKETCHAT= -declare -A role_recipients_rocketchat=() - -# alerta configs -ALERTA_WEBHOOK_URL= -ALERTA_API_KEY= -DEFAULT_RECIPIENT_ALERTA= -declare -A role_recipients_alerta=() - -# flock configs -FLOCK_WEBHOOK_URL= -DEFAULT_RECIPIENT_FLOCK= -declare -A role_recipients_flock=() - -# discord configs -DISCORD_WEBHOOK_URL= -DEFAULT_RECIPIENT_DISCORD= -declare -A role_recipients_discord=() - -# pushover configs -PUSHOVER_APP_TOKEN= -DEFAULT_RECIPIENT_PUSHOVER= -declare -A role_recipients_pushover=() - -# pushbullet configs -PUSHBULLET_ACCESS_TOKEN= -PUSHBULLET_SOURCE_DEVICE= -DEFAULT_RECIPIENT_PUSHBULLET= -declare -A role_recipients_pushbullet=() - -# twilio configs -TWILIO_ACCOUNT_SID= -TWILIO_ACCOUNT_TOKEN= -TWILIO_NUMBER= -DEFAULT_RECIPIENT_TWILIO= -declare -A role_recipients_twilio=() - -# hipchat configs -HIPCHAT_SERVER= -HIPCHAT_AUTH_TOKEN= -DEFAULT_RECIPIENT_HIPCHAT= -declare -A role_recipients_hipchat=() - -# messagebird configs -MESSAGEBIRD_ACCESS_KEY= -MESSAGEBIRD_NUMBER= -DEFAULT_RECIPIENT_MESSAGEBIRD= -declare -A role_recipients_messagebird=() - -# kavenegar configs -KAVENEGAR_API_KEY="" -KAVENEGAR_SENDER="" -DEFAULT_RECIPIENT_KAVENEGAR=() -declare -A role_recipients_kavenegar="" - -# telegram configs -TELEGRAM_BOT_TOKEN= -DEFAULT_RECIPIENT_TELEGRAM= -declare -A role_recipients_telegram=() - -# kafka configs -KAFKA_URL= -KAFKA_SENDER_IP= - -# pagerduty.com configs -PD_SERVICE_KEY= -DEFAULT_RECIPIENT_PD= -declare -A role_recipients_pd=() - -# fleep.io configs -FLEEP_SENDER="${host}" -DEFAULT_RECIPIENT_FLEEP= -declare -A role_recipients_fleep=() - -# Amazon SNS configs -DEFAULT_RECIPIENT_AWSSNS= -AWSSNS_MESSAGE_FORMAT= -declare -A role_recipients_awssns=() - -# syslog configs -SYSLOG_FACILITY= -declare -A role_recipients_syslog=() - -# custom configs -DEFAULT_RECIPIENT_CUSTOM= -declare -A role_recipients_custom=() - -# email configs -EMAIL_SENDER= -DEFAULT_RECIPIENT_EMAIL="root" -EMAIL_CHARSET=$(locale charmap 2>/dev/null) -EMAIL_THREADING= -declare -A role_recipients_email=() - -# irc configs -IRC_NICKNAME= -IRC_REALNAME= -DEFAULT_RECIPIENT_IRC= -IRC_NETWORK= -declare -A role_recipients_irc=() - -# load the stock and user configuration files -# these will overwrite the variables above - -for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/health_alarm_notify.conf" "${NETDATA_USER_CONFIG_DIR}/health_alarm_notify.conf" -do - if [ -f "${CONFIG}" ] - then - debug "Loading config file '${CONFIG}'..." - source "${CONFIG}" - [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." - else - warning "Cannot find file '${CONFIG}'." - fi -done - -# If we didn't autodetect the character set for e-mail and it wasn't -# set by the user, we need to set it to a reasonable default. UTF-8 -# should be correct for almost all modern UNIX systems. -if [ -z ${EMAIL_CHARSET} ] - then - EMAIL_CHARSET="UTF-8" -fi - -# ----------------------------------------------------------------------------- -# filter a recipient based on alarm event severity - -filter_recipient_by_criticality() { - local method="${1}" x="${2}" r s - shift - - r="${x/|*/}" # the recipient - s="${x/*|/}" # the severity required for notifying this recipient - - # no severity filtering for this person - [ "${r}" = "${s}" ] && return 0 - - # the severity is invalid - s="${s^^}" - if [ "${s}" != "CRITICAL" ] - then - error "SEVERITY FILTERING for ${x} VIA ${method}: invalid severity '${s,,}', only 'critical' is supported." - return 0 - fi - - # create the status tracking directory for this user - [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] && \ - mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" - - case "${status}" in - CRITICAL) - # make sure he will get future notifications for this alarm too - touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" - debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)" - return 0 - ;; - - WARNING) - if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ] - then - # we do not remove the file, so that he will get future notifications of this alarm - debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)" - return 0 - fi - ;; - - *) - if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ] - then - # remove the file, so that he will only receive notifications for CRITICAL states for this alarm - rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" - debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm (will only receive CRITICAL notifications from now on)" - return 0 - fi - ;; - esac - - debug "SEVERITY FILTERING for ${x} VIA ${method}: BLOCK: recipient should not receive this notification" - return 1 -} - -# ----------------------------------------------------------------------------- -# find the recipients' addresses per method - -declare -A arr_slack=() -declare -A arr_msteam=() -declare -A arr_rocketchat=() -declare -A arr_alerta=() -declare -A arr_flock=() -declare -A arr_discord=() -declare -A arr_pushover=() -declare -A arr_pushbullet=() -declare -A arr_twilio=() -declare -A arr_hipchat=() -declare -A arr_telegram=() -declare -A arr_pd=() -declare -A arr_email=() -declare -A arr_custom=() -declare -A arr_messagebird=() -declare -A arr_kavenegar=() -declare -A arr_fleep=() -declare -A arr_irc=() -declare -A arr_syslog=() -declare -A arr_awssns=() - -# netdata may call us with multiple roles, and roles may have multiple but -# overlapping recipients - so, here we find the unique recipients. -for x in ${roles//,/ } -do - # the roles 'silent' and 'disabled' mean: - # don't send a notification for this role - [ "${x}" = "silent" -o "${x}" = "disabled" ] && continue - - # email - a="${role_recipients_email[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_EMAIL}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality email "${r}" && arr_email[${r/|*/}]="1" - done - - # pushover - a="${role_recipients_pushover[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHOVER}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushover "${r}" && arr_pushover[${r/|*/}]="1" - done - - # pushbullet - a="${role_recipients_pushbullet[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHBULLET}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushbullet "${r}" && arr_pushbullet[${r/|*/}]="1" - done - - # twilio - a="${role_recipients_twilio[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TWILIO}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality twilio "${r}" && arr_twilio[${r/|*/}]="1" - done - - # hipchat - a="${role_recipients_hipchat[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_HIPCHAT}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality hipchat "${r}" && arr_hipchat[${r/|*/}]="1" - done - - # messagebird - a="${role_recipients_messagebird[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality messagebird "${r}" && arr_messagebird[${r/|*/}]="1" - done - - # kavenegar - a="${role_recipients_kavenegar[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_KAVENEGAR}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality kavenegar "${r}" && arr_kavenegar[${r/|*/}]="1" - done - - # telegram - a="${role_recipients_telegram[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TELEGRAM}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality telegram "${r}" && arr_telegram[${r/|*/}]="1" - done - - # slack - a="${role_recipients_slack[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SLACK}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality slack "${r}" && arr_slack[${r/|*/}]="1" - done - - # Microsoft Team - a="${role_recipients_msteam[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MSTEAM}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality msteam "${r}" && arr_msteam[${r/|*/}]="1" - done - - # rocketchat - a="${role_recipients_rocketchat[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ROCKETCHAT}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality rocketchat "${r}" && arr_rocketchat[${r/|*/}]="1" - done - - # alerta - a="${role_recipients_alerta[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ALERTA}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality alerta "${r}" && arr_alerta[${r/|*/}]="1" - done - - # flock - a="${role_recipients_flock[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLOCK}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality flock "${r}" && arr_flock[${r/|*/}]="1" - done - - # discord - a="${role_recipients_discord[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_DISCORD}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality discord "${r}" && arr_discord[${r/|*/}]="1" - done - - # pagerduty.com - a="${role_recipients_pd[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PD}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality pd "${r}" && arr_pd[${r/|*/}]="1" - done - - # fleep.io - a="${role_recipients_fleep[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLEEP}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality fleep "${r}" && arr_fleep[${r/|*/}]="1" - done - - # irc - a="${role_recipients_irc[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_IRC}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality irc "${r}" && arr_irc[${r/|*/}]="1" - done - - # amazon sns - a="${role_recipients_awssns[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_AWSSNS}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality awssns "${r}" && arr_awssns[${r/|*/}]="1" - done - - # syslog - a="${role_recipients_syslog[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SYSLOG}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality syslog "${r}" && arr_syslog[${r/|*/}]="1" - done - - # custom - a="${role_recipients_custom[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_CUSTOM}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality custom "${r}" && arr_custom[${r/|*/}]="1" - done - -done - -# build the list of slack recipients (channels) -to_slack="${!arr_slack[*]}" -[ -z "${to_slack}" ] && SEND_SLACK="NO" - -# build the list of Microsoft team recipients (channels) -to_msteam="${!arr_msteam[*]}" -[ -z "${to_msteam}" ] && SEND_MSTEAM="NO" - -# build the list of rocketchat recipients (channels) -to_rocketchat="${!arr_rocketchat[*]}" -[ -z "${to_rocketchat}" ] && SEND_ROCKETCHAT="NO" - -# build the list of alerta recipients (channels) -to_alerta="${!arr_alerta[*]}" -[ -z "${to_alerta}" ] && SEND_ALERTA="NO" - -# build the list of flock recipients (channels) -to_flock="${!arr_flock[*]}" -[ -z "${to_flock}" ] && SEND_FLOCK="NO" - -# build the list of discord recipients (channels) -to_discord="${!arr_discord[*]}" -[ -z "${to_discord}" ] && SEND_DISCORD="NO" - -# build the list of pushover recipients (user tokens) -to_pushover="${!arr_pushover[*]}" -[ -z "${to_pushover}" ] && SEND_PUSHOVER="NO" - -# build the list of pushbulet recipients (user tokens) -to_pushbullet="${!arr_pushbullet[*]}" -[ -z "${to_pushbullet}" ] && SEND_PUSHBULLET="NO" - -# build the list of twilio recipients (phone numbers) -to_twilio="${!arr_twilio[*]}" -[ -z "${to_twilio}" ] && SEND_TWILIO="NO" - -# build the list of hipchat recipients (rooms) -to_hipchat="${!arr_hipchat[*]}" -[ -z "${to_hipchat}" ] && SEND_HIPCHAT="NO" - -# build the list of messagebird recipients (phone numbers) -to_messagebird="${!arr_messagebird[*]}" -[ -z "${to_messagebird}" ] && SEND_MESSAGEBIRD="NO" - -# build the list of kavenegar recipients (phone numbers) -to_kavenegar="${!arr_kavenegar[*]}" -[ -z "${to_kavenegar}" ] && SEND_KAVENEGAR="NO" - -# check array of telegram recipients (chat ids) -to_telegram="${!arr_telegram[*]}" -[ -z "${to_telegram}" ] && SEND_TELEGRAM="NO" - -# build the list of pagerduty recipients (service keys) -to_pd="${!arr_pd[*]}" -[ -z "${to_pd}" ] && SEND_PD="NO" - -# build the list of fleep recipients (conversation webhooks) -to_fleep="${!arr_fleep[*]}" -[ -z "${to_fleep}" ] && SEND_FLEEP="NO" - -# build the list of custom recipients -to_custom="${!arr_custom[*]}" -[ -z "${to_custom}" ] && SEND_CUSTOM="NO" - -# build the list of email recipients (email addresses) -to_email= -for x in "${!arr_email[@]}" -do - [ ! -z "${to_email}" ] && to_email="${to_email}, " - to_email="${to_email}${x}" -done -[ -z "${to_email}" ] && SEND_EMAIL="NO" - -# build the list of irc recipients (channels) -to_irc="${!arr_irc[*]}" -[ -z "${to_irc}" ] && SEND_IRC="NO" - -# build the list of awssns recipients (facilities, servers, and prefixes) -to_awssns="${!arr_awssns[*]}" -[ -z "${to_awssns}" ] && SEND_AWSSNS="NO" - -# build the list of syslog recipients (facilities, servers, and prefixes) -to_syslog="${!arr_syslog[*]}" -[ -z "${to_syslog}" ] && SEND_SYSLOG="NO" - -# ----------------------------------------------------------------------------- -# verify the delivery methods supported - -# check slack -[ -z "${SLACK_WEBHOOK_URL}" ] && SEND_SLACK="NO" - -# check rocketchat -[ -z "${ROCKETCHAT_WEBHOOK_URL}" ] && SEND_ROCKETCHAT="NO" - -# check alerta -[ -z "${ALERTA_WEBHOOK_URL}" ] && SEND_ALERTA="NO" - -# check flock -[ -z "${FLOCK_WEBHOOK_URL}" ] && SEND_FLOCK="NO" - -# check discord -[ -z "${DISCORD_WEBHOOK_URL}" ] && SEND_DISCORD="NO" - -# check pushover -[ -z "${PUSHOVER_APP_TOKEN}" ] && SEND_PUSHOVER="NO" - -# check pushbullet -[ -z "${PUSHBULLET_ACCESS_TOKEN}" ] && SEND_PUSHBULLET="NO" - -# check twilio -[ -z "${TWILIO_ACCOUNT_TOKEN}" -o -z "${TWILIO_ACCOUNT_SID}" -o -z "${TWILIO_NUMBER}" ] && SEND_TWILIO="NO" - -# check hipchat -[ -z "${HIPCHAT_AUTH_TOKEN}" ] && SEND_HIPCHAT="NO" - -# check messagebird -[ -z "${MESSAGEBIRD_ACCESS_KEY}" -o -z "${MESSAGEBIRD_NUMBER}" ] && SEND_MESSAGEBIRD="NO" - -# check kavenegar -[ -z "${KAVENEGAR_API_KEY}" -o -z "${KAVENEGAR_SENDER}" ] && SEND_KAVENEGAR="NO" - -# check telegram -[ -z "${TELEGRAM_BOT_TOKEN}" ] && SEND_TELEGRAM="NO" - -# check kafka -[ -z "${KAFKA_URL}" -o -z "${KAFKA_SENDER_IP}" ] && SEND_KAFKA="NO" - -# check irc -[ -z "${IRC_NETWORK}" ] && SEND_IRC="NO" - -# check fleep -[ -z "${FLEEP_SERVER}" -o -z "${FLEEP_SENDER}" ] && SEND_FLEEP="NO" - -# check pagerduty.com -# if we need pd-send, check for the pd-send command -# https://www.pagerduty.com/docs/guides/agent-install-guide/ -if [ "${SEND_PD}" = "YES" ] - then - pd_send="$(which pd-send 2>/dev/null || command -v pd-send 2>/dev/null)" - if [ -z "${pd_send}" ] - then - error "Cannot find pd-send command in the system path. Disabling pagerduty.com notifications." - SEND_PD="NO" - fi -fi - -# if we need curl, check for the curl command -if [ \( \ - "${SEND_PUSHOVER}" = "YES" \ - -o "${SEND_SLACK}" = "YES" \ - -o "${SEND_ROCKETCHAT}" = "YES" \ - -o "${SEND_ALERTA}" = "YES" \ - -o "${SEND_FLOCK}" = "YES" \ - -o "${SEND_DISCORD}" = "YES" \ - -o "${SEND_HIPCHAT}" = "YES" \ - -o "${SEND_TWILIO}" = "YES" \ - -o "${SEND_MESSAGEBIRD}" = "YES" \ - -o "${SEND_KAVENEGAR}" = "YES" \ - -o "${SEND_TELEGRAM}" = "YES" \ - -o "${SEND_PUSHBULLET}" = "YES" \ - -o "${SEND_KAFKA}" = "YES" \ - -o "${SEND_FLEEP}" = "YES" \ - -o "${SEND_CUSTOM}" = "YES" \ - -o "${SEND_MSTEAM}" = "YES" \ - \) -a -z "${curl}" ] - then - curl="$(which curl 2>/dev/null || command -v curl 2>/dev/null)" - if [ -z "${curl}" ] - then - error "Cannot find curl command in the system path. Disabling all curl based notifications." - SEND_PUSHOVER="NO" - SEND_PUSHBULLET="NO" - SEND_TELEGRAM="NO" - SEND_SLACK="NO" - SEND_MSTEAM="NO" - SEND_ROCKETCHAT="NO" - SEND_ALERTA="NO" - SEND_FLOCK="NO" - SEND_DISCORD="NO" - SEND_TWILIO="NO" - SEND_HIPCHAT="NO" - SEND_MESSAGEBIRD="NO" - SEND_KAVENEGAR="NO" - SEND_KAFKA="NO" - SEND_FLEEP="NO" - SEND_CUSTOM="NO" - fi -fi - -# if we need sendmail, check for the sendmail command -if [ "${SEND_EMAIL}" = "YES" -a -z "${sendmail}" ] - then - sendmail="$(which sendmail 2>/dev/null || command -v sendmail 2>/dev/null)" - if [ -z "${sendmail}" ] - then - debug "Cannot find sendmail command in the system path. Disabling email notifications." - SEND_EMAIL="NO" - fi -fi - -# if we need logger, check for the logger command -if [ "${SEND_SYSLOG}" = "YES" -a -z "${logger}" ] - then - logger="$(which logger 2>/dev/null || command -v logger 2>/dev/null)" - if [ -z "${logger}" ] - then - debug "Cannot find logger command in the system path. Disabling syslog notifications." - SEND_SYSLOG="NO" - fi -fi - -# if we need aws, check for the aws command -if [ "${SEND_AWSSNS}" = "YES" -a -z "${aws}" ] - then - aws="$(which aws 2>/dev/null || command -v aws 2>/dev/null)" - if [ -z "${aws}" ] - then - debug "Cannot find aws command in the system path. Disabling Amazon SNS notifications." - SEND_AWSSNS="NO" - fi -fi - -# check that we have at least a method enabled -if [ "${SEND_EMAIL}" != "YES" \ - -a "${SEND_PUSHOVER}" != "YES" \ - -a "${SEND_TELEGRAM}" != "YES" \ - -a "${SEND_SLACK}" != "YES" \ - -a "${SEND_ROCKETCHAT}" != "YES" \ - -a "${SEND_ALERTA}" != "YES" \ - -a "${SEND_FLOCK}" != "YES" \ - -a "${SEND_DISCORD}" != "YES" \ - -a "${SEND_TWILIO}" != "YES" \ - -a "${SEND_HIPCHAT}" != "YES" \ - -a "${SEND_MESSAGEBIRD}" != "YES" \ - -a "${SEND_KAVENEGAR}" != "YES" \ - -a "${SEND_PUSHBULLET}" != "YES" \ - -a "${SEND_KAFKA}" != "YES" \ - -a "${SEND_PD}" != "YES" \ - -a "${SEND_FLEEP}" != "YES" \ - -a "${SEND_CUSTOM}" != "YES" \ - -a "${SEND_IRC}" != "YES" \ - -a "${SEND_AWSSNS}" != "YES" \ - -a "${SEND_SYSLOG}" != "YES" \ - -a "${SEND_MSTEAM}" != "YES" \ - ] - then - fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'." -fi - -# ----------------------------------------------------------------------------- -# get the date the alarm happened - -date=$(date --date=@${when} "${date_format}" 2>/dev/null) -[ -z "${date}" ] && date=$(date "${date_format}" 2>/dev/null) -[ -z "${date}" ] && date=$(date --date=@${when} 2>/dev/null) -[ -z "${date}" ] && date=$(date 2>/dev/null) - -# ---------------------------------------------------------------------------- -# prepare some extra headers if we've been asked to thread e-mails -if [ "${SEND_EMAIL}" == "YES" -a "${EMAIL_THREADING}" != "NO" ] ; then - email_thread_headers="In-Reply-To: <${chart}-${name}@${host}>\nReferences: <${chart}-${name}@${host}>" -else - email_thread_headers= -fi - -# ----------------------------------------------------------------------------- -# function to URL encode a string - -urlencode() { - local string="${1}" strlen encoded pos c o - - strlen=${#string} - for (( pos=0 ; pos$ ]] - then - # the name includes double quotes - sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)" - sender_name="$(echo "${EMAIL_SENDER}" | cut -d '"' -f 2)" - elif [[ "${EMAIL_SENDER}" =~ ^\'.*\'\ \<.*\>$ ]] - then - # the name includes single quotes - sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)" - sender_name="$(echo "${EMAIL_SENDER}" | cut -d "'" -f 2)" - elif [[ "${EMAIL_SENDER}" =~ ^.*\ \<.*\>$ ]] - then - # the name does not have any quotes - sender_email="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 2 | cut -d '>' -f 1)" - sender_name="$(echo "${EMAIL_SENDER}" | cut -d '<' -f 1)" - fi - fi - - [ ! -z "${sender_email}" ] && opts+=(-f "${sender_email}") - [ ! -z "${sender_name}" ] && opts+=(-F "${sender_name}") - - if [ "${debug}" = "1" ] - then - echo >&2 "--- BEGIN sendmail command ---" - printf >&2 "%q " "${sendmail}" -t "${opts[@]}" - echo >&2 - echo >&2 "--- END sendmail command ---" - fi - - "${sendmail}" -t "${opts[@]}" - ret=$? - - if [ ${ret} -eq 0 ] - then - info "sent email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}'" - return 0 - else - error "failed to send email notification for: ${host} ${chart}.${name} is ${status} to '${to_email}' with error code ${ret}." - return 1 - fi - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# pushover sender - -send_pushover() { - local apptoken="${1}" usertokens="${2}" when="${3}" url="${4}" status="${5}" title="${6}" message="${7}" httpcode sent=0 user priority - - if [ "${SEND_PUSHOVER}" = "YES" -a ! -z "${apptoken}" -a ! -z "${usertokens}" -a ! -z "${title}" -a ! -z "${message}" ] - then - - # https://pushover.net/api - priority=-2 - case "${status}" in - CLEAR) priority=-1;; # low priority: no sound or vibration - WARNING) priority=0;; # normal priority: respect quiet hours - CRITICAL) priority=1;; # high priority: bypass quiet hours - *) priority=-2;; # lowest priority: no notification at all - esac - - for user in ${usertokens} - do - httpcode=$(docurl \ - --form-string "token=${apptoken}" \ - --form-string "user=${user}" \ - --form-string "html=1" \ - --form-string "title=${title}" \ - --form-string "message=${message}" \ - --form-string "timestamp=${when}" \ - --form-string "url=${url}" \ - --form-string "url_title=Open netdata dashboard to view the alarm" \ - --form-string "priority=${priority}" \ - https://api.pushover.net/1/messages.json) - - if [ "${httpcode}" = "200" ] - then - info "sent pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}'" - sent=$((sent + 1)) - else - error "failed to send pushover notification for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# pushbullet sender - -send_pushbullet() { - local userapikey="${1}" source_device="${2}" recipients="${3}" url="${4}" title="${5}" message="${6}" httpcode sent=0 user - if [ "${SEND_PUSHBULLET}" = "YES" -a ! -z "${userapikey}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] - then - #https://docs.pushbullet.com/#create-push - for user in ${recipients} - do - httpcode=$(docurl \ - --header 'Access-Token: '${userapikey}'' \ - --header 'Content-Type: application/json' \ - --data-binary @<(cat < from the message - message="${message///}" - message="${message//<\/small>/}" - - if [ "${SEND_HIPCHAT}" = "YES" -a ! -z "${HIPCHAT_SERVER}" -a ! -z "${authtoken}" -a ! -z "${recipients}" -a ! -z "${message}" ] - then - # A label to be shown in addition to the sender's name - # Valid length range: 0 - 64. - sender="netdata" - - # Valid values: html, text. - # Defaults to 'html'. - msg_format="html" - - # Background color for message. Valid values: yellow, green, red, purple, gray, random. Defaults to 'yellow'. - case "${status}" in - WARNING) color="yellow" ;; - CRITICAL) color="red" ;; - CLEAR) color="green" ;; - *) color="gray" ;; - esac - - # Whether this message should trigger a user notification (change the tab color, play a sound, notify mobile phones, etc). - # Each recipient's notification preferences are taken into account. - # Defaults to false. - notify="true" - - for room in ${recipients} - do - httpcode=$(docurl -X POST \ - -H "Content-type: application/json" \ - -H "Authorization: Bearer ${authtoken}" \ - -d "{\"color\": \"${color}\", \"from\": \"${host}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \ - "https://${HIPCHAT_SERVER}/v2/room/${room}/notification") - - if [ "${httpcode}" = "204" ] - then - info "sent HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}'" - sent=$((sent + 1)) - else - error "failed to send HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - - -# ----------------------------------------------------------------------------- -# messagebird sender - -send_messagebird() { - local accesskey="${1}" messagebirdnumber="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user - if [ "${SEND_MESSAGEBIRD}" = "YES" -a ! -z "${accesskey}" -a ! -z "${messagebirdnumber}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] - then - #https://developers.messagebird.com/docs/messaging - for user in ${recipients} - do - httpcode=$(docurl -X POST \ - --data-urlencode "originator=${messagebirdnumber}" \ - --data-urlencode "recipients=${user}" \ - --data-urlencode "body=${title} ${message}" \ - --data-urlencode "datacoding=auto" \ - -H "Authorization: AccessKey ${accesskey}" \ - "https://rest.messagebird.com/messages") - - if [ "${httpcode}" = "201" ] - then - info "sent Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" - sent=$((sent + 1)) - else - error "failed to send Messagebird SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# kavenegar sender - -send_kavenegar() { - local API_KEY="${1}" kavenegarsender="${2}" recipients="${3}" title="${4}" message="${5}" httpcode sent=0 user - if [ "${SEND_KAVENEGAR}" = "YES" -a ! -z "${API_KEY}" -a ! -z "${kavenegarsender}" -a ! -z "${recipients}" -a ! -z "${message}" -a ! -z "${title}" ] - then - # http://api.kavenegar.com/v1/{API-KEY}/sms/send.json - for user in ${recipients} - do - httpcode=$(docurl -X POST http://api.kavenegar.com/v1/${API_KEY}/sms/send.json \ - --data-urlencode "sender=${kavenegarsender}" \ - --data-urlencode "receptor=${user}" \ - --data-urlencode "message=${title} ${message}") - - if [ "${httpcode}" = "201" ] - then - info "sent Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" - sent=$((sent + 1)) - else - error "failed to send Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# telegram sender - -send_telegram() { - local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid emoji disableNotification="" - - if [ "${status}" = "CLEAR" ]; then disableNotification="--data-urlencode disable_notification=true"; fi - - case "${status}" in - WARNING) emoji="⚠️" ;; - CRITICAL) emoji="🔴" ;; - CLEAR) emoji="✅" ;; - *) emoji="⚪️" ;; - esac - - if [ "${SEND_TELEGRAM}" = "YES" -a ! -z "${bottoken}" -a ! -z "${chatids}" -a ! -z "${message}" ]; - then - for chatid in ${chatids} - do - # https://core.telegram.org/bots/api#sendmessage - httpcode=$(docurl ${disableNotification} \ - --data-urlencode "parse_mode=HTML" \ - --data-urlencode "disable_web_page_preview=true" \ - --data-urlencode "text=${emoji} ${message}" \ - "https://api.telegram.org/bot${bottoken}/sendMessage?chat_id=${chatid}") - - if [ "${httpcode}" = "200" ] - then - info "sent telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}'" - sent=$((sent + 1)) - elif [ "${httpcode}" = "401" ] - then - error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}': Wrong bot token." - else - error "failed to send telegram notification for: ${host} ${chart}.${name} is ${status} to '${chatid}' with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# Microsoft Team sender - -send_msteam() { - - local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload - - [ "${SEND_MSTEAM}" != "YES" ] && return 1 - - case "${status}" in - WARNING) icon="${MSTEAM_ICON_WARNING}" && color="${MSTEAM_COLOR_WARNING}";; - CRITICAL) icon="${MSTEAM_ICON_CRITICAL}" && color="${MSTEAM_COLOR_CRITICAL}";; - CLEAR) icon="${MSTEAM_ICON_CLEAR}" && color="${MSTEAM_COLOR_CLEAR}";; - *) icon="${MSTEAM_ICON_DEFAULT}" && color="${MSTEAM_COLOR_DEFAULT}";; - esac - - for channel in ${channels} - do - ## More details are available here regarding the payload syntax options : https://docs.microsoft.com/en-us/outlook/actionable-messages/message-card-reference - ## Online designer : https://acdesignerbeta.azurewebsites.net/ - payload="$(cat <", - "ts": ${when} - } - ] - } -EOF - )" - - httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") - if [ "${httpcode}" = "200" ] - then - info "sent slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" - sent=$((sent + 1)) - else - error "failed to send slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - - return 1 -} - - -# ----------------------------------------------------------------------------- -# rocketchat sender - -send_rocketchat() { - local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload - - [ "${SEND_ROCKETCHAT}" != "YES" ] && return 1 - - case "${status}" in - WARNING) color="warning" ;; - CRITICAL) color="danger" ;; - CLEAR) color="good" ;; - *) color="#777777" ;; - esac - - for channel in ${channels} - do - payload="$(cat <View Netdata" - }, - "origin": "netdata/${this_host}", - "type": "netdataAlarm", - "rawData": "${BASH_ARGV[@]}" - } -EOF - )" - - if [[ -n "${ALERTA_API_KEY}" ]] - then - auth="Key ${ALERTA_API_KEY}" - fi - - httpcode=$(docurl -X POST "${webhook}/alert" -H "Content-Type: application/json" -H "Authorization: $auth" --data "${payload}") - - if [[ "${httpcode}" = "200" || "${httpcode}" = "201" ]] - then - info "sent alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" - sent=$((sent + 1)) - elif [[ "${httpcode}" = "202" ]] - then - info "suppressed alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" - else - error "failed to send alerta notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - - return 1 -} - -# ----------------------------------------------------------------------------- -# flock sender - -send_flock() { - local webhook="${1}" channels="${2}" httpcode sent=0 channel color payload - - [ "${SEND_FLOCK}" != "YES" ] && return 1 - - case "${status}" in - WARNING) color="warning" ;; - CRITICAL) color="danger" ;; - CLEAR) color="good" ;; - *) color="#777777" ;; - esac - - for channel in ${channels} - do - httpcode=$(docurl -X POST "${webhook}" -H "Content-Type: application/json" -d "{ - \"sendAs\": { - \"name\" : \"netdata on ${host}\", - \"profileImage\" : \"${images_base_url}/images/banner-icon-144x144.png\" - }, - \"text\": \"${host} *${status_message}*\", - \"timestamp\": \"${when}\", - \"attachments\": [ - { - \"description\": \"${chart} (${family}) - ${info}\", - \"color\": \"${color}\", - \"title\": \"${alarm}\", - \"url\": \"${goto_url}\", - \"text\": \"${info}\", - \"views\": { - \"image\": { - \"original\": { \"src\": \"${image}\", \"width\": 400, \"height\": 400 }, - \"thumbnail\": { \"src\": \"${image}\", \"width\": 50, \"height\": 50 }, - \"filename\": \"${image}\" - } - } - } - ] - }" ) - if [ "${httpcode}" = "200" ] - then - info "sent flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" - sent=$((sent + 1)) - else - error "failed to send flock notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." - fi - done - - [ ${sent} -gt 0 ] && return 0 - - return 1 -} - -# ----------------------------------------------------------------------------- -# discord sender - -send_discord() { - local webhook="${1}/slack" channels="${2}" httpcode sent=0 channel color payload username - - [ "${SEND_DISCORD}" != "YES" ] && return 1 - - case "${status}" in - WARNING) color="warning" ;; - CRITICAL) color="danger" ;; - CLEAR) color="good" ;; - *) color="#777777" ;; - esac - - for channel in ${channels} - do - username="netdata on ${host}" - [ ${#username} -gt 32 ] && username="${username:0:29}..." - - payload="$(cat </dev/null - if [ $? = 0 ]; then - info "sent Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'" - sent=$((sent + 1)) - else - error "failed to send Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'" - fi - done - - [ ${sent} -gt 0 ] && return 0 - - return 1 -} - -# ----------------------------------------------------------------------------- -# syslog sender - -send_syslog() { - local facility=${SYSLOG_FACILITY:-"local6"} level='info' targets="${1}" - local priority='' message='' host='' port='' prefix='' - local temp1='' temp2='' - - [ "${SEND_SYSLOG}" = "YES" ] || return 1 - - if [ "${status}" = "CRITICAL" ] ; then - level='crit' - elif [ "${status}" = "WARNING" ] ; then - level='warning' - fi - - for target in ${targets} ; do - priority="${facility}.${level}" - message='' - host='' - port='' - prefix='' - temp1='' - temp2='' - - prefix=$(echo ${target} | cut -d '/' -f 2) - temp1=$(echo ${target} | cut -d '/' -f 1) - - if [ ${prefix} != ${temp1} ] ; then - if (echo ${temp1} | grep -q '@' ) ; then - temp2=$(echo ${temp1} | cut -d '@' -f 1) - host=$(echo ${temp1} | cut -d '@' -f 2) - - if [ ${temp2} != ${host} ] ; then - priority=${temp2} - fi - - port=$(echo ${host} | rev | cut -d ':' -f 1 | rev) - - if ( echo ${host} | grep -E -q '\[.*\]' ) ; then - if ( echo ${port} | grep -q ']' ) ; then - port='' - else - host=$(echo ${host} | rev | cut -d ':' -f 2- | rev) - fi - else - if [ ${port} = ${host} ] ; then - port='' - else - host=$(echo ${host} | cut -d ':' -f 1) - fi - fi - else - priority=${temp1} - fi - fi - - message="${prefix} ${status} on ${host} at ${date}: ${chart} ${value_string}" - - if [ ${host} ] ; then - logger_options="${logger_options} -n ${host}" - if [ ${port} ] ; then - logger_options="${logger_options} -P ${port}" - fi - fi - - ${logger} -p ${priority} ${logger_options} "${message}" - done - - return $? -} - - -# ----------------------------------------------------------------------------- -# prepare the content of the notification - -# the url to send the user on click -urlencode "${host}" >/dev/null; url_host="${REPLY}" -urlencode "${chart}" >/dev/null; url_chart="${REPLY}" -urlencode "${family}" >/dev/null; url_family="${REPLY}" -urlencode "${name}" >/dev/null; url_name="${REPLY}" -goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}" - -# the severity of the alarm -severity="${status}" - -# the time the alarm was raised -duration4human ${duration} >/dev/null; duration_txt="${REPLY}" -duration4human ${non_clear_duration} >/dev/null; non_clear_duration_txt="${REPLY}" -raised_for="(was ${old_status,,} for ${duration_txt})" - -# the key status message -status_message="status unknown" - -# the color of the alarm -color="grey" - -# the alarm value -alarm="${name//_/ } = ${value_string}" - -# the image of the alarm -image="${images_base_url}/images/banner-icon-144x144.png" - -# prepare the title based on status -case "${status}" in - CRITICAL) - image="${images_base_url}/images/alert-128-red.png" - status_message="is critical" - color="#ca414b" - ;; - - WARNING) - image="${images_base_url}/images/alert-128-orange.png" - status_message="needs attention" - color="#ffc107" - ;; - - CLEAR) - image="${images_base_url}/images/check-mark-2-128-green.png" - status_message="recovered" - color="#77ca6d" - ;; -esac - -if [ "${status}" = "CLEAR" ] -then - severity="Recovered from ${old_status}" - if [ ${non_clear_duration} -gt ${duration} ] - then - raised_for="(alarm was raised for ${non_clear_duration_txt})" - fi - - # don't show the value when the status is CLEAR - # for certain alarms, this value might not have any meaning - alarm="${name//_/ } ${raised_for}" - -elif [ "${old_status}" = "WARNING" -a "${status}" = "CRITICAL" ] -then - severity="Escalated to ${status}" - if [ ${non_clear_duration} -gt ${duration} ] - then - raised_for="(alarm is raised for ${non_clear_duration_txt})" - fi - -elif [ "${old_status}" = "CRITICAL" -a "${status}" = "WARNING" ] -then - severity="Demoted to ${status}" - if [ ${non_clear_duration} -gt ${duration} ] - then - raised_for="(alarm is raised for ${non_clear_duration_txt})" - fi - -else - raised_for= -fi - -# prepare HTML versions of elements -info_html= -[ ! -z "${info}" ] && info_html="
${info}
" - -raised_for_html= -[ ! -z "${raised_for}" ] && raised_for_html="
${raised_for}" - -# ----------------------------------------------------------------------------- -# send the slack notification - -# slack aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_slack "${SLACK_WEBHOOK_URL}" "${to_slack}" -SENT_SLACK=$? - -# ----------------------------------------------------------------------------- -# send the Microsoft notification - -# Microsoft team aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_msteam "${MSTEAM_WEBHOOK_URL}" "${to_msteam}" -SENT_MSTEAM=$? - -# ----------------------------------------------------------------------------- -# send the rocketchat notification - -# rocketchat aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_rocketchat "${ROCKETCHAT_WEBHOOK_URL}" "${to_rocketchat}" -SENT_ROCKETCHAT=$? - -# ----------------------------------------------------------------------------- -# send the alerta notification - -# alerta aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_alerta "${ALERTA_WEBHOOK_URL}" "${to_alerta}" -SENT_ALERTA=$? - -# ----------------------------------------------------------------------------- -# send the flock notification - -# flock aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_flock "${FLOCK_WEBHOOK_URL}" "${to_flock}" -SENT_FLOCK=$? - -# ----------------------------------------------------------------------------- -# send the discord notification - -# discord aggregates posts from the same username -# so we use "${host} ${status}" as the bot username, to make them diff - -send_discord "${DISCORD_WEBHOOK_URL}" "${to_discord}" -SENT_DISCORD=$? - -# ----------------------------------------------------------------------------- -# send the pushover notification - -send_pushover "${PUSHOVER_APP_TOKEN}" "${to_pushover}" "${when}" "${goto_url}" "${status}" "${host} ${status_message} - ${name//_/ } - ${chart}" " -${alarm}${info_html}
  -${chart}
Chart
 
-${family}
Family
 
-${severity}
Severity
 
-${date}${raised_for_html}
Time
 
-View Netdata
  -The source of this alarm is line ${src} -" - -SENT_PUSHOVER=$? - -# ----------------------------------------------------------------------------- -# send the pushbullet notification - -send_pushbullet "${PUSHBULLET_ACCESS_TOKEN}" "${PUSHBULLET_SOURCE_DEVICE}" "${to_pushbullet}" "${goto_url}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm}\n -Severity: ${severity}\n -Chart: ${chart}\n -Family: ${family}\n -$(date -d @${when})\n -The source of this alarm is line ${src}" - -SENT_PUSHBULLET=$? - -# ----------------------------------------------------------------------------- -# send the twilio SMS - -send_twilio "${TWILIO_ACCOUNT_SID}" "${TWILIO_ACCOUNT_TOKEN}" "${TWILIO_NUMBER}" "${to_twilio}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} -Severity: ${severity} -Chart: ${chart} -Family: ${family} -${info}" - -SENT_TWILIO=$? - -# ----------------------------------------------------------------------------- -# send the messagebird SMS - -send_messagebird "${MESSAGEBIRD_ACCESS_KEY}" "${MESSAGEBIRD_NUMBER}" "${to_messagebird}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} -Severity: ${severity} -Chart: ${chart} -Family: ${family} -${info}" - -SENT_MESSAGEBIRD=$? - - -# ----------------------------------------------------------------------------- -# send the kavenegar SMS - -send_kavenegar "${KAVENEGAR_API_KEY}" "${KAVENEGAR_SENDER}" "${to_kavenegar}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} -Severity: ${severity} -Chart: ${chart} -Family: ${family} -${info}" - -SENT_KAVENEGAR=$? - - -# ----------------------------------------------------------------------------- -# send the telegram.org message - -# https://core.telegram.org/bots/api#formatting-options -send_telegram "${TELEGRAM_BOT_TOKEN}" "${to_telegram}" "${host} ${status_message} - ${name//_/ } -${chart} (${family}) -${alarm} -${info}" - -SENT_TELEGRAM=$? - - -# ----------------------------------------------------------------------------- -# send the kafka message - -send_kafka -SENT_KAFKA=$? - - -# ----------------------------------------------------------------------------- -# send the pagerduty.com message - -send_pd "${to_pd}" -SENT_PD=$? - -# ----------------------------------------------------------------------------- -# send the fleep message - -send_fleep "${to_fleep}" -SENT_FLEEP=$? - -# ----------------------------------------------------------------------------- -# send the irc message - -send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm} -Severity: ${severity} -Chart: ${chart} -Family: ${family} -${info}" - -SENT_IRC=$? - -# ----------------------------------------------------------------------------- -# send the custom message - -send_custom() { - # is it enabled? - [ "${SEND_CUSTOM}" != "YES" ] && return 1 - - # do we have any sender? - [ -z "${1}" ] && return 1 - - # call the custom_sender function - custom_sender "${@}" -} - -send_custom "${to_custom}" -SENT_CUSTOM=$? - - -# ----------------------------------------------------------------------------- -# send hipchat message - -send_hipchat "${HIPCHAT_AUTH_TOKEN}" "${to_hipchat}" " \ -${host} ${status_message}
\ -${alarm} ${info_html}
\ -${chart} (family ${family})
\ -${date}${raised_for_html}
\ -View netdata dashboard \ -(source of alarm ${src}) \ -" - -SENT_HIPCHAT=$? - - -# ----------------------------------------------------------------------------- -# send the Amazon SNS message - -send_awssns ${to_awssns} - -SENT_AWSSNS=$? - - -# ----------------------------------------------------------------------------- -# send the syslog message - -send_syslog ${to_syslog} - -SENT_SYSLOG=$? - - -# ----------------------------------------------------------------------------- -# send the email - -send_email < - - - - - - - - - -
-
- - - - - - - - - - - - -
-
netdata notification
-
-

${host} ${status_message}

-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
- ${chart} - Chart -
- ${alarm}${info_html} - Alarm -
- ${family} - Family -
- ${severity} - Severity -
${date} - ${raised_for_html} Time -
- View Netdata -
The source of this alarm is line ${src}
(alarms are configurable, edit this file to adapt the alarm to your needs) -
Sent by - netdata, the real-time performance and health monitoring, on ${this_host}. -
-
-
-
-
- - ---multipart-boundary-- -EOF - -SENT_EMAIL=$? - -# ----------------------------------------------------------------------------- -# let netdata know - -if [ ${SENT_EMAIL} -eq 0 \ - -o ${SENT_PUSHOVER} -eq 0 \ - -o ${SENT_TELEGRAM} -eq 0 \ - -o ${SENT_SLACK} -eq 0 \ - -o ${SENT_MSTEAM} -eq 0 \ - -o ${SENT_ROCKETCHAT} -eq 0 \ - -o ${SENT_ALERTA} -eq 0 \ - -o ${SENT_FLOCK} -eq 0 \ - -o ${SENT_DISCORD} -eq 0 \ - -o ${SENT_TWILIO} -eq 0 \ - -o ${SENT_HIPCHAT} -eq 0 \ - -o ${SENT_MESSAGEBIRD} -eq 0 \ - -o ${SENT_KAVENEGAR} -eq 0 \ - -o ${SENT_PUSHBULLET} -eq 0 \ - -o ${SENT_KAFKA} -eq 0 \ - -o ${SENT_PD} -eq 0 \ - -o ${SENT_FLEEP} -eq 0 \ - -o ${SENT_IRC} -eq 0 \ - -o ${SENT_AWSSNS} -eq 0 \ - -o ${SENT_CUSTOM} -eq 0 \ - -o ${SENT_SYSLOG} -eq 0 \ - ] - then - # we did send something - exit 0 -fi - -# we did not send anything -exit 1 diff --git a/health/notifications/alarm-notify.sh.in b/health/notifications/alarm-notify.sh.in index ea8223097..dd3cda917 100755 --- a/health/notifications/alarm-notify.sh.in +++ b/health/notifications/alarm-notify.sh.in @@ -27,6 +27,7 @@ # - messagebird.com notifications by @tech_no_logical #1453 # - hipchat notifications by @ktsaou #1561 # - fleep notifications by @Ferroin +# - prowlapp.com notifications by @Ferroin # - custom notifications by @ktsaou # - syslog messages by @Ferroin # - Microsoft Team notification by @tioumen @@ -54,7 +55,7 @@ then echo >&2 echo >&2 "# SENDING TEST ${x} ALARM TO ROLE: ${recipient}" - "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value" + "${0}" "${recipient}" "$(hostname)" 1 1 "${id}" "$(date +%s)" "test_alarm" "test.chart" "test.family" "${x}" "${last}" 100 90 "${0}" 1 $((0 + id)) "units" "this is a test alarm to verify notifications work" "new value" "old value" "evaluated expression" "expression variable values" 0 0 if [ $? -ne 0 ] then echo >&2 "# FAILED" @@ -142,6 +143,31 @@ docurl() { return $? } +# ----------------------------------------------------------------------------- +# List of all the notification mechanisms we support. +# Used in a couple of places to write more compact code. + +method_names=" +email +pushover +pushbullet +telegram +slack +alerta +flock +discord +hipchat +twilio +messagebird +pd +fleep +syslog +custom +msteam +kavenegar +prowl +" + # ----------------------------------------------------------------------------- # this is to be overwritten by the config file @@ -167,32 +193,51 @@ custom_sender() { # ----------------------------------------------------------------------------- # parse command line parameters -roles="${1}" # the roles that should be notified for this event -host="${2}" # the host generated this event -unique_id="${3}" # the unique id of this event -alarm_id="${4}" # the unique id of the alarm that generated this event -event_id="${5}" # the incremental id of the event, for this alarm id -when="${6}" # the timestamp this event occurred -name="${7}" # the name of the alarm, as given in netdata health.d entries -chart="${8}" # the name of the chart (type.id) -family="${9}" # the family of the chart -status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL -old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL -value="${12}" # the current value of the alarm -old_value="${13}" # the previous value of the alarm -src="${14}" # the line number and file the alarm has been configured -duration="${15}" # the duration in seconds of the previous alarm state -non_clear_duration="${16}" # the total duration in seconds this is/was non-clear -units="${17}" # the units of the value -info="${18}" # a short description of the alarm -value_string="${19}" # friendly value (with units) -old_value_string="${20}" # friendly old value (with units) +if [ ${1} = "unittest" ] ; then + unittest=1 # enable unit testing mode + roles="${2}" # the role that should be used for unit testing + cfgfile="${3}" # the location of the config file to use for unit testing + status="${4}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL + old_status="${5}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL +else + roles="${1}" # the roles that should be notified for this event + args_host="${2}" # the host generated this event + unique_id="${3}" # the unique id of this event + alarm_id="${4}" # the unique id of the alarm that generated this event + event_id="${5}" # the incremental id of the event, for this alarm id + when="${6}" # the timestamp this event occurred + name="${7}" # the name of the alarm, as given in netdata health.d entries + chart="${8}" # the name of the chart (type.id) + family="${9}" # the family of the chart + status="${10}" # the current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL + old_status="${11}" # the previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL + value="${12}" # the current value of the alarm + old_value="${13}" # the previous value of the alarm + src="${14}" # the line number and file the alarm has been configured + duration="${15}" # the duration in seconds of the previous alarm state + non_clear_duration="${16}" # the total duration in seconds this is/was non-clear + units="${17}" # the units of the value + info="${18}" # a short description of the alarm + value_string="${19}" # friendly value (with units) + old_value_string="${20}" # friendly old value (with units) + calc_expression="${21}" # contains the expression that was evaluated to trigger the alarm + calc_param_values="${22}" # the values of the parameters in the expression, at the time of the evaluation + total_warnings="${23}" # Total number of alarms in WARNING state + total_critical="${24}" # Total number of alarms in CRITICAL state +fi + # ----------------------------------------------------------------------------- # find a suitable hostname to use, if netdata did not supply a hostname -this_host=$(hostname -s 2>/dev/null) -[ -z "${host}" ] && host="${this_host}" +if [ -z ${args_host} ] + then + this_host=$(hostname -s 2>/dev/null) + host="${this_host}" + args_host="${this_host}" +else + host="${args_host}" +fi # ----------------------------------------------------------------------------- # screen statuses we don't need to send a notification @@ -205,7 +250,7 @@ then fi # don't do anything if this is CLEAR, but it was not WARNING or CRITICAL -if [ "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ] +if [ "${clear_alarm_always}" != "YES" -a "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ] then info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})" exit 1 @@ -223,118 +268,80 @@ images_base_url="https://registry.my-netdata.io" # curl options to use curl_options="" +# hostname handling +use_fqdn="NO" + # needed commands # if empty they will be searched in the system path curl= sendmail= # enable / disable features -SEND_SLACK="YES" -SEND_MSTEAM="YES" -SEND_ALERTA="YES" -SEND_FLOCK="YES" -SEND_DISCORD="YES" -SEND_PUSHOVER="YES" -SEND_TWILIO="YES" -SEND_HIPCHAT="YES" -SEND_MESSAGEBIRD="YES" -SEND_KAVENEGAR="YES" -SEND_TELEGRAM="YES" -SEND_EMAIL="YES" -SEND_PUSHBULLET="YES" -SEND_KAFKA="YES" -SEND_PD="YES" -SEND_FLEEP="YES" -SEND_IRC="YES" -SEND_AWSSNS="YES" -SEND_SYSLOG="NO" -SEND_CUSTOM="YES" +for method_name in ${method_names^^} ; do + declare SEND_${method_name}="YES" + declare DEFAULT_RECIPIENT_${method_name} +done + +for method_name in ${method_names} ; do + declare -A role_recipients_${method_name} +done # slack configs SLACK_WEBHOOK_URL= -DEFAULT_RECIPIENT_SLACK= -declare -A role_recipients_slack=() # Microsoft Team configs MSTEAM_WEBHOOK_URL= -DEFAULT_RECIPIENT_MSTEAM= -declare -A role_recipients_msteam=() # rocketchat configs ROCKETCHAT_WEBHOOK_URL= -DEFAULT_RECIPIENT_ROCKETCHAT= -declare -A role_recipients_rocketchat=() # alerta configs ALERTA_WEBHOOK_URL= ALERTA_API_KEY= -DEFAULT_RECIPIENT_ALERTA= -declare -A role_recipients_alerta=() # flock configs FLOCK_WEBHOOK_URL= -DEFAULT_RECIPIENT_FLOCK= -declare -A role_recipients_flock=() # discord configs DISCORD_WEBHOOK_URL= -DEFAULT_RECIPIENT_DISCORD= -declare -A role_recipients_discord=() # pushover configs PUSHOVER_APP_TOKEN= -DEFAULT_RECIPIENT_PUSHOVER= -declare -A role_recipients_pushover=() # pushbullet configs PUSHBULLET_ACCESS_TOKEN= PUSHBULLET_SOURCE_DEVICE= -DEFAULT_RECIPIENT_PUSHBULLET= -declare -A role_recipients_pushbullet=() # twilio configs TWILIO_ACCOUNT_SID= TWILIO_ACCOUNT_TOKEN= TWILIO_NUMBER= -DEFAULT_RECIPIENT_TWILIO= -declare -A role_recipients_twilio=() # hipchat configs HIPCHAT_SERVER= HIPCHAT_AUTH_TOKEN= -DEFAULT_RECIPIENT_HIPCHAT= -declare -A role_recipients_hipchat=() # messagebird configs MESSAGEBIRD_ACCESS_KEY= MESSAGEBIRD_NUMBER= -DEFAULT_RECIPIENT_MESSAGEBIRD= -declare -A role_recipients_messagebird=() # kavenegar configs -KAVENEGAR_API_KEY="" -KAVENEGAR_SENDER="" -DEFAULT_RECIPIENT_KAVENEGAR=() -declare -A role_recipients_kavenegar="" +KAVENEGAR_API_KEY= +KAVENEGAR_SENDER= # telegram configs TELEGRAM_BOT_TOKEN= -DEFAULT_RECIPIENT_TELEGRAM= -declare -A role_recipients_telegram=() # kafka configs +SEND_KAFKA="YES" KAFKA_URL= KAFKA_SENDER_IP= # pagerduty.com configs PD_SERVICE_KEY= -DEFAULT_RECIPIENT_PD= -declare -A role_recipients_pd=() # fleep.io configs FLEEP_SENDER="${host}" -DEFAULT_RECIPIENT_FLEEP= -declare -A role_recipients_fleep=() # Amazon SNS configs DEFAULT_RECIPIENT_AWSSNS= @@ -343,40 +350,38 @@ declare -A role_recipients_awssns=() # syslog configs SYSLOG_FACILITY= -declare -A role_recipients_syslog=() - -# custom configs -DEFAULT_RECIPIENT_CUSTOM= -declare -A role_recipients_custom=() # email configs EMAIL_SENDER= -DEFAULT_RECIPIENT_EMAIL="root" EMAIL_CHARSET=$(locale charmap 2>/dev/null) EMAIL_THREADING= -declare -A role_recipients_email=() +DEFAULT_RECIPIENT_EMAIL="root" # irc configs IRC_NICKNAME= IRC_REALNAME= -DEFAULT_RECIPIENT_IRC= IRC_NETWORK= -declare -A role_recipients_irc=() # load the stock and user configuration files # these will overwrite the variables above -for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/health_alarm_notify.conf" "${NETDATA_USER_CONFIG_DIR}/health_alarm_notify.conf" -do - if [ -f "${CONFIG}" ] - then - debug "Loading config file '${CONFIG}'..." - source "${CONFIG}" - [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." - else - warning "Cannot find file '${CONFIG}'." - fi -done +if [ ${unittest} ] ; + then + source "${cfgfile}" + [ $? -ne 0 ] && error "Failed to load requested config file." && exit 1 +else + for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/health_alarm_notify.conf" "${NETDATA_USER_CONFIG_DIR}/health_alarm_notify.conf" + do + if [ -f "${CONFIG}" ] + then + debug "Loading config file '${CONFIG}'..." + source "${CONFIG}" + [ $? -ne 0 ] && error "Failed to load config file '${CONFIG}'." + else + warning "Cannot find file '${CONFIG}'." + fi + done +fi # If we didn't autodetect the character set for e-mail and it wasn't # set by the user, we need to set it to a reasonable default. UTF-8 @@ -386,6 +391,14 @@ if [ -z ${EMAIL_CHARSET} ] EMAIL_CHARSET="UTF-8" fi +# If we've been asked to use FQDN's for the URL's in the alarm, do so, +# unless we're sending an alarm for a slave system which we can't get the +# FQDN of easily. +if [ "${use_fqdn}" = "YES" -a "${host}" = "$(hostname -s 2>/dev/null)" ] + then + host="$(hostname -f 2>/dev/null)" +fi + # ----------------------------------------------------------------------------- # filter a recipient based on alarm event severity @@ -443,285 +456,6 @@ filter_recipient_by_criticality() { return 1 } -# ----------------------------------------------------------------------------- -# find the recipients' addresses per method - -declare -A arr_slack=() -declare -A arr_msteam=() -declare -A arr_rocketchat=() -declare -A arr_alerta=() -declare -A arr_flock=() -declare -A arr_discord=() -declare -A arr_pushover=() -declare -A arr_pushbullet=() -declare -A arr_twilio=() -declare -A arr_hipchat=() -declare -A arr_telegram=() -declare -A arr_pd=() -declare -A arr_email=() -declare -A arr_custom=() -declare -A arr_messagebird=() -declare -A arr_kavenegar=() -declare -A arr_fleep=() -declare -A arr_irc=() -declare -A arr_syslog=() -declare -A arr_awssns=() - -# netdata may call us with multiple roles, and roles may have multiple but -# overlapping recipients - so, here we find the unique recipients. -for x in ${roles//,/ } -do - # the roles 'silent' and 'disabled' mean: - # don't send a notification for this role - [ "${x}" = "silent" -o "${x}" = "disabled" ] && continue - - # email - a="${role_recipients_email[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_EMAIL}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality email "${r}" && arr_email[${r/|*/}]="1" - done - - # pushover - a="${role_recipients_pushover[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHOVER}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushover "${r}" && arr_pushover[${r/|*/}]="1" - done - - # pushbullet - a="${role_recipients_pushbullet[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PUSHBULLET}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality pushbullet "${r}" && arr_pushbullet[${r/|*/}]="1" - done - - # twilio - a="${role_recipients_twilio[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TWILIO}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality twilio "${r}" && arr_twilio[${r/|*/}]="1" - done - - # hipchat - a="${role_recipients_hipchat[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_HIPCHAT}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality hipchat "${r}" && arr_hipchat[${r/|*/}]="1" - done - - # messagebird - a="${role_recipients_messagebird[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MESSAGEBIRD}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality messagebird "${r}" && arr_messagebird[${r/|*/}]="1" - done - - # kavenegar - a="${role_recipients_kavenegar[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_KAVENEGAR}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality kavenegar "${r}" && arr_kavenegar[${r/|*/}]="1" - done - - # telegram - a="${role_recipients_telegram[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_TELEGRAM}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality telegram "${r}" && arr_telegram[${r/|*/}]="1" - done - - # slack - a="${role_recipients_slack[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SLACK}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality slack "${r}" && arr_slack[${r/|*/}]="1" - done - - # Microsoft Team - a="${role_recipients_msteam[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_MSTEAM}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality msteam "${r}" && arr_msteam[${r/|*/}]="1" - done - - # rocketchat - a="${role_recipients_rocketchat[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ROCKETCHAT}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality rocketchat "${r}" && arr_rocketchat[${r/|*/}]="1" - done - - # alerta - a="${role_recipients_alerta[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_ALERTA}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality alerta "${r}" && arr_alerta[${r/|*/}]="1" - done - - # flock - a="${role_recipients_flock[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLOCK}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality flock "${r}" && arr_flock[${r/|*/}]="1" - done - - # discord - a="${role_recipients_discord[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_DISCORD}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality discord "${r}" && arr_discord[${r/|*/}]="1" - done - - # pagerduty.com - a="${role_recipients_pd[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_PD}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality pd "${r}" && arr_pd[${r/|*/}]="1" - done - - # fleep.io - a="${role_recipients_fleep[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_FLEEP}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality fleep "${r}" && arr_fleep[${r/|*/}]="1" - done - - # irc - a="${role_recipients_irc[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_IRC}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality irc "${r}" && arr_irc[${r/|*/}]="1" - done - - # amazon sns - a="${role_recipients_awssns[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_AWSSNS}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality awssns "${r}" && arr_awssns[${r/|*/}]="1" - done - - # syslog - a="${role_recipients_syslog[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_SYSLOG}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality syslog "${r}" && arr_syslog[${r/|*/}]="1" - done - - # custom - a="${role_recipients_custom[${x}]}" - [ -z "${a}" ] && a="${DEFAULT_RECIPIENT_CUSTOM}" - for r in ${a//,/ } - do - [ "${r}" != "disabled" ] && filter_recipient_by_criticality custom "${r}" && arr_custom[${r/|*/}]="1" - done - -done - -# build the list of slack recipients (channels) -to_slack="${!arr_slack[*]}" -[ -z "${to_slack}" ] && SEND_SLACK="NO" - -# build the list of Microsoft team recipients (channels) -to_msteam="${!arr_msteam[*]}" -[ -z "${to_msteam}" ] && SEND_MSTEAM="NO" - -# build the list of rocketchat recipients (channels) -to_rocketchat="${!arr_rocketchat[*]}" -[ -z "${to_rocketchat}" ] && SEND_ROCKETCHAT="NO" - -# build the list of alerta recipients (channels) -to_alerta="${!arr_alerta[*]}" -[ -z "${to_alerta}" ] && SEND_ALERTA="NO" - -# build the list of flock recipients (channels) -to_flock="${!arr_flock[*]}" -[ -z "${to_flock}" ] && SEND_FLOCK="NO" - -# build the list of discord recipients (channels) -to_discord="${!arr_discord[*]}" -[ -z "${to_discord}" ] && SEND_DISCORD="NO" - -# build the list of pushover recipients (user tokens) -to_pushover="${!arr_pushover[*]}" -[ -z "${to_pushover}" ] && SEND_PUSHOVER="NO" - -# build the list of pushbulet recipients (user tokens) -to_pushbullet="${!arr_pushbullet[*]}" -[ -z "${to_pushbullet}" ] && SEND_PUSHBULLET="NO" - -# build the list of twilio recipients (phone numbers) -to_twilio="${!arr_twilio[*]}" -[ -z "${to_twilio}" ] && SEND_TWILIO="NO" - -# build the list of hipchat recipients (rooms) -to_hipchat="${!arr_hipchat[*]}" -[ -z "${to_hipchat}" ] && SEND_HIPCHAT="NO" - -# build the list of messagebird recipients (phone numbers) -to_messagebird="${!arr_messagebird[*]}" -[ -z "${to_messagebird}" ] && SEND_MESSAGEBIRD="NO" - -# build the list of kavenegar recipients (phone numbers) -to_kavenegar="${!arr_kavenegar[*]}" -[ -z "${to_kavenegar}" ] && SEND_KAVENEGAR="NO" - -# check array of telegram recipients (chat ids) -to_telegram="${!arr_telegram[*]}" -[ -z "${to_telegram}" ] && SEND_TELEGRAM="NO" - -# build the list of pagerduty recipients (service keys) -to_pd="${!arr_pd[*]}" -[ -z "${to_pd}" ] && SEND_PD="NO" - -# build the list of fleep recipients (conversation webhooks) -to_fleep="${!arr_fleep[*]}" -[ -z "${to_fleep}" ] && SEND_FLEEP="NO" - -# build the list of custom recipients -to_custom="${!arr_custom[*]}" -[ -z "${to_custom}" ] && SEND_CUSTOM="NO" - -# build the list of email recipients (email addresses) -to_email= -for x in "${!arr_email[@]}" -do - [ ! -z "${to_email}" ] && to_email="${to_email}, " - to_email="${to_email}${x}" -done -[ -z "${to_email}" ] && SEND_EMAIL="NO" - -# build the list of irc recipients (channels) -to_irc="${!arr_irc[*]}" -[ -z "${to_irc}" ] && SEND_IRC="NO" - -# build the list of awssns recipients (facilities, servers, and prefixes) -to_awssns="${!arr_awssns[*]}" -[ -z "${to_awssns}" ] && SEND_AWSSNS="NO" - -# build the list of syslog recipients (facilities, servers, and prefixes) -to_syslog="${!arr_syslog[*]}" -[ -z "${to_syslog}" ] && SEND_SYSLOG="NO" - # ----------------------------------------------------------------------------- # verify the delivery methods supported @@ -770,25 +504,13 @@ to_syslog="${!arr_syslog[*]}" # check fleep [ -z "${FLEEP_SERVER}" -o -z "${FLEEP_SENDER}" ] && SEND_FLEEP="NO" -# check pagerduty.com -# if we need pd-send, check for the pd-send command -# https://www.pagerduty.com/docs/guides/agent-install-guide/ -if [ "${SEND_PD}" = "YES" ] - then - pd_send="$(which pd-send 2>/dev/null || command -v pd-send 2>/dev/null)" - if [ -z "${pd_send}" ] - then - error "Cannot find pd-send command in the system path. Disabling pagerduty.com notifications." - SEND_PD="NO" - fi -fi - # if we need curl, check for the curl command if [ \( \ "${SEND_PUSHOVER}" = "YES" \ -o "${SEND_SLACK}" = "YES" \ -o "${SEND_ROCKETCHAT}" = "YES" \ -o "${SEND_ALERTA}" = "YES" \ + -o "${SEND_PD}" = "YES" \ -o "${SEND_FLOCK}" = "YES" \ -o "${SEND_DISCORD}" = "YES" \ -o "${SEND_HIPCHAT}" = "YES" \ @@ -799,6 +521,7 @@ if [ \( \ -o "${SEND_PUSHBULLET}" = "YES" \ -o "${SEND_KAFKA}" = "YES" \ -o "${SEND_FLEEP}" = "YES" \ + -o "${SEND_PROWL}" = "YES" \ -o "${SEND_CUSTOM}" = "YES" \ -o "${SEND_MSTEAM}" = "YES" \ \) -a -z "${curl}" ] @@ -814,6 +537,7 @@ if [ \( \ SEND_MSTEAM="NO" SEND_ROCKETCHAT="NO" SEND_ALERTA="NO" + SEND_PD="NO" SEND_FLOCK="NO" SEND_DISCORD="NO" SEND_TWILIO="NO" @@ -822,6 +546,7 @@ if [ \( \ SEND_KAVENEGAR="NO" SEND_KAFKA="NO" SEND_FLEEP="NO" + SEND_PROWL="NO" SEND_CUSTOM="NO" fi fi @@ -859,6 +584,68 @@ if [ "${SEND_AWSSNS}" = "YES" -a -z "${aws}" ] fi fi +# ----------------------------------------------------------------------------- +# find the recipients' addresses per method + +# netdata may call us with multiple roles, and roles may have multiple but +# overlapping recipients - so, here we find the unique recipients. +for method_name in ${method_names} ; do + send_var="SEND_${method_name^^}" + if [ ${!send_var} = "NO" ] ; then + continue + fi + + declare -A arr_var=() + + for x in ${roles//,/ } ; do + # the roles 'silent' and 'disabled' mean: + # don't send a notification for this role + [ "${x}" = "silent" -o "${x}" = "disabled" ] && continue + + role_recipients="role_recipients_${method_name}[$x]" + default_recipient_var="DEFAULT_RECIPIENT_${method_name^^}" + + a="${!role_recipients}" + [ -z "${a}" ] && a="${!default_recipient_var}" + for r in ${a//,/ } ; do + [ "${r}" != "disabled" ] && filter_recipient_by_criticality ${method_name} "${r}" && arr_var[${r/|*/}]="1" + done + done + + # build the list of recipients + to_var="to_${method_name}" + declare to_${method_name}="${!arr_var[*]}" + + [ -z "${!to_var}" ] && declare ${send_var}="NO" +done + +# ----------------------------------------------------------------------------- +# handle fixup of the email recipient list. + +fix_to_email() { + to_email= + while [ ! -z "${1}" ] + do + [ ! -z "${to_email}" ] && to_email="${to_email}, " + to_email="${to_email}${1}" + shift 1 + done +} + +# ${to_email} without quotes here +fix_to_email ${to_email} + +# ----------------------------------------------------------------------------- +# handle output if we're running in unit test mode +if [ ${unittest} ] ; then + for method_name in ${method_names} ; do + to_var="to_${method_name}" + echo "results: ${method_name}: ${!to_var}" + done + exit 0 +fi + +# ----------------------------------------------------------------------------- # check that we have at least a method enabled if [ "${SEND_EMAIL}" != "YES" \ -a "${SEND_PUSHOVER}" != "YES" \ @@ -879,6 +666,7 @@ if [ "${SEND_EMAIL}" != "YES" \ -a "${SEND_CUSTOM}" != "YES" \ -a "${SEND_IRC}" != "YES" \ -a "${SEND_AWSSNS}" != "YES" \ + -a "${SEND_PROWL}" != "YES" \ -a "${SEND_SYSLOG}" != "YES" \ -a "${SEND_MSTEAM}" != "YES" \ ] @@ -1126,7 +914,7 @@ EOF # kafka sender send_kafka() { - local httpcode sent=0 + local httpcode sent=0 if [ "${SEND_KAFKA}" = "YES" ] then httpcode=$(docurl -X POST \ @@ -1163,38 +951,42 @@ send_pd() { then for PD_SERVICE_KEY in ${recipients} do - d="${status} ${name} = ${value_string} - ${host}, ${family}" - ${pd_send} -k ${PD_SERVICE_KEY} \ - -t ${t} \ - -d "${d}" \ - -i ${host}:${chart}:${name} \ - -f 'info'="${info}" \ - -f 'value_w_units'="${value_string}" \ - -f 'when'="${when}" \ - -f 'duration'="${duration}" \ - -f 'roles'="${roles}" \ - -f 'host'="${host}" \ - -f 'unique_id'="${unique_id}" \ - -f 'alarm_id'="${alarm_id}" \ - -f 'event_id'="${event_id}" \ - -f 'name'="${name}" \ - -f 'chart'="${chart}" \ - -f 'family'="${family}" \ - -f 'status'="${status}" \ - -f 'old_status'="${old_status}" \ - -f 'value'="${value}" \ - -f 'old_value'="${old_value}" \ - -f 'src'="${src}" \ - -f 'non_clear_duration'="${non_clear_duration}" \ - -f 'units'="${units}" - retval=$? - if [ ${retval} -eq 0 ] - then - info "sent pagerduty.com notification for host ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}....: ${d}" - sent=$((sent + 1)) - else - error "failed to send pagerduty.com notification for ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}.... (error code ${retval}): ${d}" - fi + d="${status} ${name} = ${value_string} - ${host}, ${family}" + payload="$(cat << EOF + { + "service_key": "${PD_SERVICE_KEY}", + "event_type": "${t}", + "incident_key" : "${alarm_id}", + "description": "${d}", + "details": { + "value_w_units": "${value_string}", + "when": "${when}", + "duration" : "${duration}", + "roles": "${roles}", + "alarm_id" : "${alarm_id}", + "name" : "${name}", + "chart" : "${chart}", + "family" : "${family}", + "status" : "${status}", + "old_status" : "${old_status}", + "value" : "${value}", + "old_value" : "${old_value}", + "src" : "${src}", + "non_clear_duration" : "${non_clear_duration}", + "units" : "${units}", + "info" : "${info}" + } + } +EOF + )" + httpcode=$(docurl -X POST --data "${payload}" "https://events.pagerduty.com/generic/2010-04-15/create_event.json") + if [ "${httpcode}" = "200" ] + then + info "sent pagerduty notification for: ${host} ${chart}.${name} is ${status}'" + sent=$((sent + 1)) + else + error "failed to send pagerduty notification for: ${host} ${chart}.${name} is ${status}, with HTTP error code ${httpcode}." + fi done [ ${sent} -gt 0 ] && return 0 @@ -1249,7 +1041,7 @@ send_hipchat() { if [ "${SEND_HIPCHAT}" = "YES" -a ! -z "${HIPCHAT_SERVER}" -a ! -z "${authtoken}" -a ! -z "${recipients}" -a ! -z "${message}" ] then # A label to be shown in addition to the sender's name - # Valid length range: 0 - 64. + # Valid length range: 0 - 64. sender="netdata" # Valid values: html, text. @@ -1276,7 +1068,7 @@ send_hipchat() { -H "Authorization: Bearer ${authtoken}" \ -d "{\"color\": \"${color}\", \"from\": \"${host}\", \"message_format\": \"${msg_format}\", \"message\": \"${message}\", \"notify\": \"${notify}\"}" \ "https://${HIPCHAT_SERVER}/v2/room/${room}/notification") - + if [ "${httpcode}" = "204" ] then info "sent HipChat notification for: ${host} ${chart}.${name} is ${status} to '${room}'" @@ -1341,7 +1133,7 @@ send_kavenegar() { --data-urlencode "receptor=${user}" \ --data-urlencode "message=${title} ${message}") - if [ "${httpcode}" = "201" ] + if [ "${httpcode}" = "200" ] then info "sent Kavenegar SMS for: ${host} ${chart}.${name} is ${status} to '${user}'" sent=$((sent + 1)) @@ -1363,7 +1155,7 @@ send_telegram() { local bottoken="${1}" chatids="${2}" message="${3}" httpcode sent=0 chatid emoji disableNotification="" if [ "${status}" = "CLEAR" ]; then disableNotification="--data-urlencode disable_notification=true"; fi - + case "${status}" in WARNING) emoji="⚠️" ;; CRITICAL) emoji="🔴" ;; @@ -1476,9 +1268,21 @@ send_slack() { for channel in ${channels} do + # Default entry in the recipient is without a hash in front (backwards-compatible). Accept specification of channel or user. + if [ "${channel::1}" != "#" ] && [ "${channel::1}" != "@" ] ; then channel="#$channel"; fi + + # If channel is equal to "#" then do not send the channel attribute at all. Slack also defines channels and users in webhooks. + if [ "${channel}" = "#" ] ; then + ch="" + chstr="without specifying a channel" + else + ch="\"channel\": \"${channel}\"," + chstr="to '${channel}'" + fi + payload="$(cat <", + "footer": "by <${goto_url}|${host}>", "ts": ${when} } ] @@ -1511,10 +1315,10 @@ EOF httpcode=$(docurl -X POST --data-urlencode "payload=${payload}" "${webhook}") if [ "${httpcode}" = "200" ] then - info "sent slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}'" + info "sent slack notification for: ${host} ${chart}.${name} is ${status} ${chstr}" sent=$((sent + 1)) else - error "failed to send slack notification for: ${host} ${chart}.${name} is ${status} to '${channel}', with HTTP error code ${httpcode}." + error "failed to send slack notification for: ${host} ${chart}.${name} is ${status} ${chstr}, with HTTP error code ${httpcode}." fi done @@ -1633,7 +1437,7 @@ send_alerta() { "source": "${src}", "moreInfo": "View Netdata" }, - "origin": "netdata/${this_host}", + "origin": "netdata/${host}", "type": "netdataAlarm", "rawData": "${BASH_ARGV[@]}" } @@ -1759,7 +1563,7 @@ send_discord() { ], "thumb_url": "${image}", "footer_icon": "${images_base_url}/images/banner-icon-144x144.png", - "footer": "${this_host}", + "footer": "${host}", "ts": ${when} } ] @@ -1812,12 +1616,56 @@ send_fleep() { return 1 } +# ----------------------------------------------------------------------------- +# Prowl sender + +send_prowl() { + local httpcode sent=0 data message keys prio=0 alarm_url event + if [ "${SEND_PROWL}" = "YES" ] ; then + message="$(urlencode "${host} ${status_message}, \`${chart}\` (${family}), *${alarm}*\\n${info}")" + message="description=${message}" + keys="$(urlencode "$(echo "${1}" | tr ' ' ,)")" + keys="apikey=${keys}" + app="application=Netdata" + + case "${status}" in + CRITICAL) + prio=2 + ;; + WARNING) + prio=1 + ;; + esac + pri="priority=${pri}" + + alarm_url="$(urlencode ${goto_url})" + alarm_url="url=${alarm_url}" + event="$(urlencode "${host} ${status_message}")" + event="event=${event}" + + data="${keys}&${pri}&${alarm_url}&${app}&${event}&${message}" + + httpcode=$(docurl -X POST --data "${data}" "https://api.prowlapp.com/publicapi/add") + + if [ "${httpcode}" = "200" ] ; then + info "sent prowl data for: ${host} ${chart}.${name} is ${status}" + sent=1 + else + error "failed to send prowl data for: ${host} ${chart}.${name} is ${status} with with error code ${httpcode}." + fi + + [ ${sent} -gt 0 ] && return 0 + fi + + return 1 +} + # ----------------------------------------------------------------------------- # irc sender send_irc() { local NICKNAME="${1}" REALNAME="${2}" CHANNELS="${3}" NETWORK="${4}" SERVERNAME="${5}" MESSAGE="${6}" sent=0 channel color send_alarm reply_codes error - + if [ "${SEND_IRC}" = "YES" -a ! -z "${NICKNAME}" -a ! -z "${REALNAME}" -a ! -z "${CHANNELS}" -a ! -z "${NETWORK}" -a ! -z "${SERVERNAME}" ] then case "${status}" in @@ -1842,13 +1690,13 @@ send_irc() { info "sent irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}'" sent=$((sent + 1)) else - error "failed to send irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}', with error code ${code}." + error "failed to send irc notification for: ${host} ${chart}.${name} is ${status} to '${CHANNEL}', with error code ${code}." fi done fi - + [ ${sent} -gt 0 ] && return 0 - + return 1 } @@ -1871,7 +1719,7 @@ send_awssns() { info "sent Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'" sent=$((sent + 1)) else - error "failed to send Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'" + error "failed to send Amazon SNS notification for: ${host} ${chart}.${name} is ${status} to '${target}'" fi done @@ -1957,11 +1805,30 @@ send_syslog() { # prepare the content of the notification # the url to send the user on click -urlencode "${host}" >/dev/null; url_host="${REPLY}" +urlencode "${args_host}" >/dev/null; url_host="${REPLY}" urlencode "${chart}" >/dev/null; url_chart="${REPLY}" urlencode "${family}" >/dev/null; url_family="${REPLY}" urlencode "${name}" >/dev/null; url_name="${REPLY}" -goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}" + +redirect_params="host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}" +GOTOCLOUD=0 + +if [ "${NETDATA_REGISTRY_URL}" == "https://registry.my-netdata.io" ] ; then + if [ -z "${NETDATA_REGISTRY_UNIQUE_ID}" ] ; then + if [ -f "@registrydir_POST@/netdata.public.unique.id" ]; then + NETDATA_REGISTRY_UNIQUE_ID="$(cat "@registrydir_POST@/netdata.public.unique.id")" + fi + fi + if [ ! -z "${NETDATA_REGISTRY_UNIQUE_ID}" ] ; then + GOTOCLOUD=1 + fi +fi + +if [ ${GOTOCLOUD} -eq 0 ] ; then + goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?${redirect_params}" +else + goto_url="https://netdata.cloud/alarms/redirect?agentID=${NETDATA_REGISTRY_UNIQUE_ID}&${redirect_params}" +fi # the severity of the alarm severity="${status}" @@ -2127,7 +1994,7 @@ SENT_PUSHBULLET=$? # ----------------------------------------------------------------------------- # send the twilio SMS -send_twilio "${TWILIO_ACCOUNT_SID}" "${TWILIO_ACCOUNT_TOKEN}" "${TWILIO_NUMBER}" "${to_twilio}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} +send_twilio "${TWILIO_ACCOUNT_SID}" "${TWILIO_ACCOUNT_TOKEN}" "${TWILIO_NUMBER}" "${to_twilio}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} Severity: ${severity} Chart: ${chart} Family: ${family} @@ -2138,7 +2005,7 @@ SENT_TWILIO=$? # ----------------------------------------------------------------------------- # send the messagebird SMS -send_messagebird "${MESSAGEBIRD_ACCESS_KEY}" "${MESSAGEBIRD_NUMBER}" "${to_messagebird}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} +send_messagebird "${MESSAGEBIRD_ACCESS_KEY}" "${MESSAGEBIRD_NUMBER}" "${to_messagebird}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} Severity: ${severity} Chart: ${chart} Family: ${family} @@ -2150,7 +2017,7 @@ SENT_MESSAGEBIRD=$? # ----------------------------------------------------------------------------- # send the kavenegar SMS -send_kavenegar "${KAVENEGAR_API_KEY}" "${KAVENEGAR_SENDER}" "${to_kavenegar}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} +send_kavenegar "${KAVENEGAR_API_KEY}" "${KAVENEGAR_SENDER}" "${to_kavenegar}" "${host} ${status_message} - ${name//_/ } - ${chart}" "${alarm} Severity: ${severity} Chart: ${chart} Family: ${family} @@ -2190,10 +2057,16 @@ SENT_PD=$? send_fleep "${to_fleep}" SENT_FLEEP=$? +# ----------------------------------------------------------------------------- +# send the Prowl message + +send_prowl "${to_prowl}" +SENT_PROWL=$? + # ----------------------------------------------------------------------------- # send the irc message -send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm} +send_irc "${IRC_NICKNAME}" "${IRC_REALNAME}" "${to_irc}" "${IRC_NETWORK}" "${host}" "${host} ${status_message} - ${name//_/ } - ${chart} ----- ${alarm} Severity: ${severity} Chart: ${chart} Family: ${family} @@ -2278,7 +2151,12 @@ Severity: ${severity} URL : ${goto_url} Source : ${src} Date : ${date} -Notification generated on ${this_host} +Notification generated on ${host} + +Evaluated Expression : ${calc_expression} +Expression Variables : ${calc_param_values} + +The host has ${total_warnings} WARNING and ${total_critical} CRITICAL alarm(s) raised. --multipart-boundary Content-Type: text/html; encoding=${EMAIL_CHARSET} @@ -2340,6 +2218,24 @@ Content-Transfer-Encoding: 8bit ${raised_for_html} Time + + + ${calc_expression} + Evaluated Expression + + + + + ${calc_param_values} + Expression Variables + + + + + The host has ${total_warnings} WARNING and ${total_critical} CRITICAL alarm(s) raised. + + + View Netdata @@ -2351,7 +2247,7 @@ Content-Transfer-Encoding: 8bit Sent by - netdata, the real-time performance and health monitoring, on ${this_host}. + netdata, the real-time performance and health monitoring, on ${host}. @@ -2393,6 +2289,7 @@ if [ ${SENT_EMAIL} -eq 0 \ -o ${SENT_KAFKA} -eq 0 \ -o ${SENT_PD} -eq 0 \ -o ${SENT_FLEEP} -eq 0 \ + -o ${SENT_PROWL} -eq 0 \ -o ${SENT_IRC} -eq 0 \ -o ${SENT_AWSSNS} -eq 0 \ -o ${SENT_CUSTOM} -eq 0 \ diff --git a/health/notifications/alerta/README.md b/health/notifications/alerta/README.md index cf43621ff..2826fe773 100644 --- a/health/notifications/alerta/README.md +++ b/health/notifications/alerta/README.md @@ -1,4 +1,4 @@ -# alerta.io notifications +# alerta.io The [Alerta](https://alerta.io) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick @@ -73,8 +73,10 @@ We can test alarms using the standard approach: $ /opt/netdata/netdata-plugins/plugins.d/alarm-notify.sh test Note: Netdata will send 3 alarms, and because last alarm is "CLEAR" -you will not se them in main Alerta page, you need to select to see +you will not see them in main Alerta page, you need to select to see "closed" alarma in top-right lookup. A little change in `alarm-notify.sh` that let us test each state one by one will be useful. For more information see [https://docs.alerta.io](https://docs.alerta.io) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Falerta%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/awssns/README.md b/health/notifications/awssns/README.md index d040d2d29..5205d4cb4 100644 --- a/health/notifications/awssns/README.md +++ b/health/notifications/awssns/README.md @@ -1,4 +1,4 @@ -# Amazon SNS notifications +# Amazon SNS As part of it's AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' or SNS. Amazon SNS works kind of similarly to Netdata's own notification system, allowing dispatch of a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to: @@ -28,4 +28,6 @@ Notes: * Netdata's native email notification support is far better in almost all respects than it's support through Amazon SNS. If you want email notifications, use the native support, not SNS. * If you need to change the notification format for SNS notifications, you can do so by specifying the format in `AWSSNS_MESSAGE_FORMAT` in the configuration. This variable supports all the same vairiables you can use in custom notifications. - * While Amazon SNS supports sending differently formatted messages for different delivery methods, netdata does not currently support this functionality. \ No newline at end of file + * While Amazon SNS supports sending differently formatted messages for different delivery methods, netdata does not currently support this functionality. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fawssns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/discord/README.md b/health/notifications/discord/README.md index 5889ea099..7694fef4b 100644 --- a/health/notifications/discord/README.md +++ b/health/notifications/discord/README.md @@ -1,4 +1,4 @@ -# Discordapp.com notifications +# Discordapp.com This is what you will get: @@ -42,3 +42,5 @@ role_recipients_discord[webmaster]="marketing development" ``` The keywords `systems`, `databases`, `marketing`, `development` are discordapp.com channels (they should already exist within your discord server). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fdiscord%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/email/README.md b/health/notifications/email/README.md index 979790ad6..163839b6b 100644 --- a/health/notifications/email/README.md +++ b/health/notifications/email/README.md @@ -1,4 +1,4 @@ -# email notifications +# email You need a working `sendmail` command for email alerts to work. Almost all MTAs provide a `sendmail` interface. @@ -28,4 +28,6 @@ sudo su -s /bin/bash netdata /usr/libexec/netdata/plugins.d/alarm-notify.sh test [ROLE] ``` -Where `[ROLE]` is the role you want to test. The default (if you don't give a `[ROLE]`) is `sysadmin`. \ No newline at end of file +Where `[ROLE]` is the role you want to test. The default (if you don't give a `[ROLE]`) is `sysadmin`. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Femail%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/flock/README.md b/health/notifications/flock/README.md index 33a545eac..0d679ce6b 100644 --- a/health/notifications/flock/README.md +++ b/health/notifications/flock/README.md @@ -1,4 +1,4 @@ -# flock.com notifications +# flock.com This is what you will get: @@ -28,4 +28,6 @@ FLOCK_WEBHOOK_URL="https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXX # if a role recipient is not configured, no notification will be sent DEFAULT_RECIPIENT_FLOCK="alarms" -``` \ No newline at end of file +``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fflock%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/health_alarm_notify.conf b/health/notifications/health_alarm_notify.conf index a997765a6..b96cf5793 100755 --- a/health/notifications/health_alarm_notify.conf +++ b/health/notifications/health_alarm_notify.conf @@ -14,6 +14,7 @@ # - sms messages to your cell phone or any sms enabled device (twilio.com) # - sms messages to your cell phone or any sms enabled device (messagebird.com) # - notifications to users on pagerduty.com +# - push notifications to iOS devices (via prowlapp.com) # - notifications to Amazon SNS topics (aws.amazon.com) # - messages to your irc channel on your selected network # - messages to a local or remote syslog daemon @@ -63,6 +64,24 @@ date_format='' +#------------------------------------------------------------------------------ +# hostname handling +# +# By default, Netdata will use the simple hostname for the system (the +# hostname with everything after the first `.` removed) when displaying +# the hostname in alert notifications. If you prefer, you can uncomment +# the line below to have Netdata instead use the host's fully qualified +# domain name. +# +# This does not report correct FQDN's for slave systems for which this +# sytem is a master. +# +# Additionally, if the system host name is overridden in /etc/netdata.conf +# with the `hostname` option, that name will be used unconditionally +# instead of this. +#use_fqdn='YES' + + #------------------------------------------------------------------------------ # external commands @@ -108,6 +127,14 @@ aws="" # here in most cases. #logger_options="" +#------------------------------------------------------------------------------ +# extra options + +# By default don't do anything if this is CLEAR, but it was not WARNING or CRITICAL. +# You can send it always if your system makes deduplication for alarms. +#clear_alarm_always='YES' + +# #------------------------------------------------------------------------------ # NOTE ABOUT RECIPIENTS # @@ -268,7 +295,7 @@ SEND_MESSAGEBIRD="YES" # to get an access key, create a free account at https://www.messagebird.com # verify and activate the account (no CC info needed) # login to your account and enter your phonenumber to get some free credits -# to get the API key, click on 'API' in the sidebar, then 'API Access (REST)' +# to get the API key, click on 'API' in the sidebar, then 'API Access (REST)' # click 'Add access key' and fill in data (you want a live key to send SMS) # Without an access key, netdata cannot send Messagebird text messages. @@ -324,7 +351,7 @@ DEFAULT_RECIPIENT_TELEGRAM="" # slack (slack.com) global notification options # multiple recipients can be given like this: -# "CHANNEL1 CHANNEL2 ..." +# "RECIPIENT1 RECIPIENT2 ..." # enable/disable sending slack notifications SEND_SLACK="YES" @@ -335,9 +362,11 @@ SEND_SLACK="YES" # Get yours from: https://api.slack.com/incoming-webhooks SLACK_WEBHOOK_URL="" -# if a role's recipients are not configured, a notification will be send to -# this slack channel (empty = do not send a notification for unconfigured -# roles): +# if a role's recipients are not configured, a notification will be send to: +# - A slack channel (syntax: '#channel' or 'channel') +# - A slack user (syntax: '@user') +# - The channel or user defined in slack for the webhook (syntax: '#') +# empty = do not send a notification for unconfigured roles DEFAULT_RECIPIENT_SLACK="" #------------------------------------------------------------------------------ @@ -405,7 +434,7 @@ DEFAULT_RECIPIENT_ROCKETCHAT="" SEND_ALERTA="YES" # here set your alerta server API url -# this is the API url you defined when installed Alerta server, +# this is the API url you defined when installed Alerta server, # it is the same for all users. Do not include last slash. # ALERTA_WEBHOOK_URL="https:///alerta/api" ALERTA_WEBHOOK_URL="" @@ -493,9 +522,9 @@ KAFKA_SENDER_IP="" #------------------------------------------------------------------------------ # pagerduty.com notification options # -# pagerduty.com notifications require the pagerduty agent to be installed and -# a "Generic API" pagerduty service. -# https://www.pagerduty.com/docs/guides/agent-install-guide/ +# pagerduty.com notifications require a "Generic API" (Events v1) +# pagerduty service. +# https://support.pagerduty.com/docs/services-and-integrations # multiple recipients can be given like this: # " ..." @@ -534,7 +563,7 @@ FLEEP_SENDER="" #------------------------------------------------------------------------------ # irc notification options # -# irc notifications require only the nc utility to be installed. +# irc notifications require only the nc utility to be installed. # multiple recipients can be given like this: # " ..." @@ -550,7 +579,7 @@ DEFAULT_RECIPIENT_IRC="" # e.g. "irc.freenode.net" IRC_NETWORK="" -# The irc nickname which is required to send the notification. It must not be +# The irc nickname which is required to send the notification. It must not be # an already registered name as the connection's MODE is defined as a 'guest'. IRC_NICKNAME="" @@ -625,6 +654,22 @@ SYSLOG_FACILITY='' # DEFAULT_RECIPIENT_SYSLOG="netdata" +#------------------------------------------------------------------------------ +# iOS Push Notifications + +# enable/disable sending iOS push notifications +SEND_PROWL="YES" + +# If a role's recipients are not configured, use the following, +# (empty = do not send a notiication for unconfigured roles) +# +# Recipients for iOS push notifications are Prowl API keys. +# +# A recipient may also consist of multiple Prowl API keys separated by +# commas, in which case notifications will be simultaneously sent for all +# of those API keys. +DEFAULT_RECIPIENT_PROWL="" + #------------------------------------------------------------------------------ # Amazon SNS notifications # @@ -748,6 +793,8 @@ role_recipients_irc[sysadmin]="${DEFAULT_RECIPIENT_IRC}" role_recipients_syslog[sysadmin]="${DEFAULT_RECIPIENT_SYSLOG}" +role_recipients_prowl[sysadming]="${DEFAULT_RECIPIENT_PROWL}" + role_recipients_awssns[sysadmin]="${DEFAULT_RECIPIENT_AWSSNS}" role_recipients_custom[sysadmin]="${DEFAULT_RECIPIENT_CUSTOM}" @@ -789,6 +836,8 @@ role_recipients_irc[domainadmin]="${DEFAULT_RECIPIENT_IRC}" role_recipients_syslog[domainadmin]="${DEFAULT_RECIPIENT_SYSLOG}" +role_recipients_prowl[domainadmin]="${DEFAULT_RECIPIENT_PROWL}" + role_recipients_awssns[domainadmin]="${DEFAULT_RECIPIENT_AWSSNS}" role_recipients_custom[domainadmin]="${DEFAULT_RECIPIENT_CUSTOM}" @@ -831,6 +880,8 @@ role_recipients_irc[dba]="${DEFAULT_RECIPIENT_IRC}" role_recipients_syslog[dba]="${DEFAULT_RECIPIENT_SYSLOG}" +role_recipients_prowl[dba]="${DEFAULT_RECIPIENT_PROWL}" + role_recipients_awssns[dba]="${DEFAULT_RECIPIENT_AWSSNS}" role_recipients_custom[dba]="${DEFAULT_RECIPIENT_CUSTOM}" @@ -873,6 +924,8 @@ role_recipients_irc[webmaster]="${DEFAULT_RECIPIENT_IRC}" role_recipients_syslog[webmaster]="${DEFAULT_RECIPIENT_SYSLOG}" +role_recipients_prowl[webmaster]="${DEFAULT_RECIPIENT_PROWL}" + role_recipients_awssns[webmaster]="${DEFAULT_RECIPIENT_AWSSNS}" role_recipients_custom[webmaster]="${DEFAULT_RECIPIENT_CUSTOM}" @@ -915,6 +968,8 @@ role_recipients_irc[proxyadmin]="${DEFAULT_RECIPIENT_IRC}" role_recipients_syslog[proxyadmin]="${DEFAULT_RECIPIENT_SYSLOG}" +role_recipients_prowl[proxyadmin]="${DEFAULT_RECIPIENT_PROWL}" + role_recipients_awssns[porxyadmin]="${DEFAULT_RECIPIENT_AWSSNS}" role_recipients_custom[proxyadmin]="${DEFAULT_RECIPIENT_CUSTOM}" @@ -955,6 +1010,8 @@ role_recipients_fleep[sitemgr]="${DEFAULT_RECIPIENT_FLEEP}" role_recipients_syslog[sitemgr]="${DEFAULT_RECIPIENT_SYSLOG}" +role_recipients_prowl[sitemgr]="${DEFAULT_RECIPIENT_PROWL}" + role_recipients_awssns[sitemgr]="${DEFAULT_RECIPIENT_AWSSNS}" role_recipients_custom[sitemgr]="${DEFAULT_RECIPIENT_CUSTOM}" diff --git a/health/notifications/irc/README.md b/health/notifications/irc/README.md index ece879fe6..9ea86e92d 100644 --- a/health/notifications/irc/README.md +++ b/health/notifications/irc/README.md @@ -1,4 +1,4 @@ -# IRC notifications +# IRC This is what you will get: @@ -70,4 +70,6 @@ role_recipients_irc[dba]="#databases-alarms" role_recipients_irc[webmaster]="#networking-alarms" ``` -The keywords `#user-alarms`, `#networking-alarms`, `#system-alarms`, `#databases-alarms` are irc channels which belong to the specified IRC network. \ No newline at end of file +The keywords `#user-alarms`, `#networking-alarms`, `#system-alarms`, `#databases-alarms` are irc channels which belong to the specified IRC network. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Firc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/kavenegar/README.md b/health/notifications/kavenegar/README.md index e59ad4d4f..d833eef82 100644 --- a/health/notifications/kavenegar/README.md +++ b/health/notifications/kavenegar/README.md @@ -1,4 +1,4 @@ -# Kavenegar notifications +# Kavenegar [Kavenegar](https://www.kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs. @@ -36,4 +36,6 @@ SEND_KAVENEGAR="YES" KAVENEGAR_API_KEY="" KAVENEGAR_SENDER="" DEFAULT_RECIPIENT_KAVENEGAR="" -``` \ No newline at end of file +``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fkavenegar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/messagebird/README.md b/health/notifications/messagebird/README.md index e09ba5d38..cdb3e8dc1 100644 --- a/health/notifications/messagebird/README.md +++ b/health/notifications/messagebird/README.md @@ -1,5 +1,6 @@ +# Messagebird -Will look like this on your Android device: +The messagebird notifications will look like this on your Android device: ![image](https://cloud.githubusercontent.com/assets/17090999/20034652/620b6100-a39b-11e6-96af-4f83b8e830e2.png) @@ -36,3 +37,5 @@ MESSAGEBIRD_NUMBER="XXXXXXX" DEFAULT_RECIPIENT_MESSAGEBIRD="XXXXXXX" ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fmessagebird%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/pagerduty/README.md b/health/notifications/pagerduty/README.md index e35051fd4..884b97923 100644 --- a/health/notifications/pagerduty/README.md +++ b/health/notifications/pagerduty/README.md @@ -1,3 +1,4 @@ +# PagerDuty [PagerDuty](https://www.pagerduty.com/company/) is the enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times. @@ -32,3 +33,5 @@ SEND_PD="YES" # (empty = do not send a notification for unconfigured roles): DEFAULT_RECIPIENT_PD="" ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpagerduty%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/prowl/Makefile.inc b/health/notifications/prowl/Makefile.inc new file mode 100644 index 000000000..08e4c2e54 --- /dev/null +++ b/health/notifications/prowl/Makefile.inc @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_noinst_DATA += \ + prowl/README.md \ + prowl/Makefile.inc \ + $(NULL) + diff --git a/health/notifications/prowl/README.md b/health/notifications/prowl/README.md new file mode 100644 index 000000000..1f060edcf --- /dev/null +++ b/health/notifications/prowl/README.md @@ -0,0 +1,22 @@ +# prowl + +(Prowl)[1] is a push notification service for iOS devices. Netdata +supprots delivering notifications to iOS devices through Prowl. + +Because of how Netdata integrates with Prowl, there is a hard limit of +at most 1000 notifications per hour (starting from the first notification +sent). Any alerts beyond the first thousand in an hour will be dropped. + +Warning messages will be sent with the 'High' priority, critical messages +will be sent with the 'Emergency' priority, and all other messages will +be sent with the normal priority. Opening the notification's associated +URL will take you to the Netdata dashboard of the system that issued +the alert, directly to the chart that it triggered on. + +## configuration + +To use this, you will need a Prowl API key, which can be rquested through +the Prowl website after registering. + +Once you have an API key, simply specify that as a recipient for Prowl +notifications. diff --git a/health/notifications/pushbullet/README.md b/health/notifications/pushbullet/README.md index 827d9301d..42b343e45 100644 --- a/health/notifications/pushbullet/README.md +++ b/health/notifications/pushbullet/README.md @@ -1,4 +1,4 @@ -$ PushBullet notifications +# PushBullet Will look like this on your browser: ![image](https://cloud.githubusercontent.com/assets/4300670/19109636/278b1c0c-8aee-11e6-8a09-7fc94fdbfec8.png) @@ -39,4 +39,6 @@ SEND_PUSHBULLET="YES" # Without an access token, netdata cannot send pushbullet notifications. PUSHBULLET_ACCESS_TOKEN="o.Sometokenhere" DEFAULT_RECIPIENT_PUSHBULLET="admin1@example.com admin3@somemail.com" -``` \ No newline at end of file +``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpushbullet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/pushover/README.md b/health/notifications/pushover/README.md index 284981695..1debf5dcd 100644 --- a/health/notifications/pushover/README.md +++ b/health/notifications/pushover/README.md @@ -1,5 +1,4 @@ - -# PushOver notifications +# PushOver pushover.net allows you to receive push notifications on your mobile phone. The service seems free for up to 7.500 messages per month. @@ -14,4 +13,6 @@ The configuration is like above (slack messages). pushover.net notifications look like this: -![image](https://cloud.githubusercontent.com/assets/2662304/18407319/839c10c4-7715-11e6-92c0-12f8215128d3.png) \ No newline at end of file +![image](https://cloud.githubusercontent.com/assets/2662304/18407319/839c10c4-7715-11e6-92c0-12f8215128d3.png) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fpushover%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/rocketchat/README.md b/health/notifications/rocketchat/README.md index 70c698672..f05e73f08 100644 --- a/health/notifications/rocketchat/README.md +++ b/health/notifications/rocketchat/README.md @@ -1,4 +1,4 @@ -# Rocket.Chat notifications +# Rocket.Chat This is what you will get: ![Netdata on RocketChat](https://i.imgur.com/Zu4t3j3.png) @@ -44,3 +44,5 @@ role_recipients_rocketchat[webmaster]="marketing development" The keywords `systems`, `databases`, `marketing`, `development` are RocketChat channels (they should already exist). Both public and private channels can be used, even if they differ from the channel configured in yout RocketChat incomming webhook. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Frocketchat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/slack/README.md b/health/notifications/slack/README.md index 45be45197..6e578282e 100644 --- a/health/notifications/slack/README.md +++ b/health/notifications/slack/README.md @@ -1,4 +1,4 @@ -# Slack.com notifications +# Slack This is what you will get: ![image](https://cloud.githubusercontent.com/assets/2662304/18407116/bbd0fee6-7710-11e6-81cf-58c0defaee2b.png) @@ -17,7 +17,7 @@ Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system r # sending slack notifications # note: multiple recipients can be given like this: -# "CHANNEL1 CHANNEL2 ..." +# "RECIPIENT1 RECIPIENT2 ..." # enable/disable sending pushover notifications SEND_SLACK="YES" @@ -27,14 +27,23 @@ SEND_SLACK="YES" # Without it, netdata cannot send slack notifications. SLACK_WEBHOOK_URL="https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" -# if a role recipient is not configured, a notification will be send to -# this slack channel: +# if a role's recipients are not configured, a notification will be send to: +# - A slack channel (syntax: '#channel' or 'channel') +# - A slack user (syntax: '@user') +# - The channel or user defined in slack for the webhook (syntax: '#') +# empty = do not send a notification for unconfigured roles DEFAULT_RECIPIENT_SLACK="alarms" ``` -You can define multiple channels like this: `alarms systems`. -You can give different channels per **role** using these (at the same file): +You can define multiple recipients like this: `# #alarms systems @myuser`. +This example will send the alarm to: +- The recipient defined in slack for the webhook (not known to netdata) +- The channel 'alarms' +- The channel 'systems' +- The user @myuser + +You can give different recipients per **role** using these (at the same file): ``` role_recipients_slack[sysadmin]="systems" @@ -42,4 +51,4 @@ role_recipients_slack[dba]="databases systems" role_recipients_slack[webmaster]="marketing development" ``` -The keywords `systems`, `databases`, `marketing`, `development` are slack.com channels (they should already exist in slack). +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fslack%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/syslog/README.md b/health/notifications/syslog/README.md index fcc2466a6..597db0cd2 100644 --- a/health/notifications/syslog/README.md +++ b/health/notifications/syslog/README.md @@ -1,4 +1,4 @@ -# syslog notifications +# Syslog You need a working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems. @@ -21,3 +21,5 @@ Targets are defined as follows: The `facility` and `level` are the standard syslog facility and level options, for more info on them see your local `logger` and `syslog` documentation. By default, netdata will log to the `local6` facility, with a log level dependent on the type of message (`crit` for CRITICAL, `warning` for WARNING, and `info` for everything else). You can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fsyslog%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/telegram/README.md b/health/notifications/telegram/README.md index cd52fe194..9d652542c 100644 --- a/health/notifications/telegram/README.md +++ b/health/notifications/telegram/README.md @@ -1,4 +1,4 @@ -# Telegram.org notifications +# Telegram [Telegram](https://telegram.org/) is a messaging app with a focus on speed and security, it’s super-fast, simple and free. You can use Telegram on all your devices at the same time — your messages sync seamlessly across any number of your phones, tablets or computers. @@ -17,3 +17,5 @@ See slack for configuration. Telegram messages look like this: ![image](https://fb.hash.works/ytl/preview.jpg) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Ftelegram%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/twilio/README.md b/health/notifications/twilio/README.md index ab3dd3c04..743f54e3c 100644 --- a/health/notifications/twilio/README.md +++ b/health/notifications/twilio/README.md @@ -1,4 +1,4 @@ -# Twilio.com notifications +# Twilio Will look like this on your Android device: @@ -38,3 +38,5 @@ TWILIO_ACCOUNT_TOKEN="xxxxxxxxxx" TWILIO_NUMBER="xxxxxxxxxxx" DEFAULT_RECIPIENT_TWILIO="+15555555555" ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Ftwilio%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/health/notifications/web/README.md b/health/notifications/web/README.md index ba7dac1fe..0aac9419b 100644 --- a/health/notifications/web/README.md +++ b/health/notifications/web/README.md @@ -1,6 +1,8 @@ -# Dashboard notifications +# Dashboard The netdata dashboard shows HTML notifications, when it is open. Such web notifications look like this: ![image](https://cloud.githubusercontent.com/assets/2662304/18407279/82bac6a6-7714-11e6-847e-c2e84eeacbfb.png) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fhealth%2Fnotifications%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/htmldoc/buildhtml.sh b/htmldoc/buildhtml.sh deleted file mode 100755 index 8a41f454f..000000000 --- a/htmldoc/buildhtml.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# buildhtml.sh - -# Builds the html static site, using mkdocs -# Assumes that the script is executed from the root netdata folder, by calling htmldoc/buildhtml.sh - -# Copy all netdata .md files to htmldoc/src. Exclude htmldoc itself and also the directory node_modules generated by Netlify -echo "Copying files" -rm -rf htmldoc/src -find . -type d \( -path ./htmldoc -o -path ./node_modules \) -prune -o -name "*.md" -print | cpio -pd htmldoc/src - -# Modify the first line of the main README.md, to enable proper static html generation -sed -i '0,/# netdata /s//# Introducing NetData\n\n/' htmldoc/src/README.md - -echo "Creating mkdocs.yaml" - -# Generate mkdocs.yaml -htmldoc/buildyaml.sh > htmldoc/mkdocs.yml - -echo "Fixing links" - -# Fix links (recursively, all types, executing replacements) -htmldoc/checklinks.sh -rax -if [ $? -eq 1 ] ; then exit 1 ; fi - -echo "Calling mkdocs" - -# Build html docs -mkdocs build --config-file=htmldoc/mkdocs.yml - -echo "Finished" - diff --git a/htmldoc/buildyaml.sh b/htmldoc/buildyaml.sh deleted file mode 100755 index 096e4ce5c..000000000 --- a/htmldoc/buildyaml.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/bin/bash - -cd htmldoc/src - -# create yaml nav subtree with all the files directly under a specific directory -# arguments: -# tabs - how deep do we show it in the hierarchy. Level 1 is the top level, max should probably be 3 -# directory - to get mds from to add them to the yaml -# file - can be left empty to include all files -# name - what do we call the relevant section on the navbar. Empty if no new section is required -# maxdepth - how many levels of subdirectories do I include in the yaml in this section. 1 means just the top level and is the default if left empty -# excludefirstlevel - Optional param. If passed, mindepth is set to 2, to exclude the READMEs in the first directory level - -navpart () { - tabs=$1 - dir=$2 - file=$3 - section=$4 - maxdepth=$5 - excludefirstlevel=$6 - spc="" - - i=1 - while [ ${i} -lt ${tabs} ] ; do - spc=" $spc" - i=$[$i + 1] - done - - if [ -z "$file" ] ; then file='*' ; fi - if [[ ! -z "$section" ]] ; then echo "$spc- ${section}:" ; fi - if [ -z "$maxdepth" ] ; then maxdepth=1; fi - if [[ ! -z "$excludefirstlevel" ]] ; then mindepth=2 ; else mindepth=1; fi - - for f in $(find $dir -mindepth $mindepth -maxdepth $maxdepth -name "${file}.md" -printf '%h\0%d\0%p\n' | sort -t '\0' -n | awk -F '\0' '{print $3}'); do - # If I'm adding a section, I need the child links to be one level deeper than the requested level in "tabs" - if [ -z "$section" ] ; then - echo "$spc- '$f'" - else - echo "$spc - '$f'" - fi - done -} - - -echo -e 'site_name: NetData Documentation -repo_url: https://github.com/netdata/netdata -repo_name: GitHub -edit_uri: blob/master -site_description: NetData Documentation -copyright: NetData, 2018 -docs_dir: src -site_dir: build -#use_directory_urls: false -theme: - name: "material" - custom_dir: themes/material -markdown_extensions: - - extra - - abbr - - attr_list - - def_list - - fenced_code - - footnotes - - tables - - admonition - - codehilite - - meta - - nl2br - - sane_lists - - smarty - - toc: - permalink: True - separator: "-" - - wikilinks -nav:' - -navpart 1 . README "Getting Started" - -echo -ne " - 'doc/Why-Netdata.md' - - 'doc/Demo-Sites.md' - - Installation: - - 'installer/README.md' - - 'docker/README.md' - - 'installer/UPDATE.md' - - 'installer/UNINSTALL.md' -" -echo -ne "- Using NetData: -" -navpart 2 daemon -navpart 2 web "README" "Web Dashboards" -navpart 3 web/gui "" "" 3 -navpart 2 web/server "" "Web Server" -navpart 3 web/server "" "" 2 excludefirstlevel -navpart 2 web/api "" "Web API" -navpart 3 web/api "" "" 4 excludefirstlevel -navpart 2 daemon/config -#navpart 2 system -navpart 2 registry -navpart 2 streaming "" "" 4 -navpart 2 backends "" "Backends" 3 -navpart 2 database - -echo -ne " - 'doc/Performance.md' - - 'doc/netdata-for-IoT.md' - - 'doc/high-performance-netdata.md' - - 'doc/netdata-security.md' - - 'doc/Netdata-Security-and-Disclosure-Information.md' -" - -navpart 2 health README "Health Monitoring" -navpart 3 health/notifications "" "" 1 -navpart 3 health/notifications "" "Supported Notifications" 2 excludefirstlevel - -echo -ne " - Running-behind-another-web-server: - - 'doc/Running-behind-nginx.md' - - 'doc/Running-behind-apache.md' - - 'doc/Running-behind-lighttpd.md' - - 'doc/Running-behind-caddy.md' -" - - -navpart 1 collectors "" "Data Collection" 1 -echo -ne " - 'doc/Add-more-charts-to-netdata.md' - - Internal Plugins: -" -navpart 3 collectors/proc.plugin -navpart 3 collectors/statsd.plugin -navpart 3 collectors/cgroups.plugin -navpart 3 collectors/idlejitter.plugin -navpart 3 collectors/tc.plugin -navpart 3 collectors/nfacct.plugin -navpart 3 collectors/checks.plugin -navpart 3 collectors/diskspace.plugin -navpart 3 collectors/freebsd.plugin -navpart 3 collectors/macos.plugin - -navpart 2 collectors/plugins.d "" "External Plugins" -navpart 3 collectors/python.d.plugin "" "Python Plugins" 3 -navpart 3 collectors/node.d.plugin "" "Node.js Plugins" 3 -navpart 3 collectors/charts.d.plugin "" "BASH Plugins" 3 -navpart 3 collectors/apps.plugin -navpart 3 collectors/fping.plugin -navpart 3 collectors/freeipmi.plugin - -echo -ne " - Third Party Plugins: - - 'doc/Third-Party-Plugins.md' -" - -echo -ne "- Hacking netdata: - - CONTRIBUTING.md - - CODE_OF_CONDUCT.md - - CONTRIBUTORS.md -" -navpart 2 makeself "" "" 4 -navpart 2 packaging "" "" 4 -navpart 2 libnetdata "" "libnetdata" 4 -navpart 2 contrib -navpart 2 tests -navpart 2 diagrams/data_structures - -echo -ne "- About: - - 'doc/Donations-netdata-has-received.md' - - 'doc/a-github-star-is-important.md' - - CHANGELOG.md - - HISTORICAL_CHANGELOG.md - - REDISTRIBUTED.md -" - - - - diff --git a/htmldoc/themes/material/partials/footer.html b/htmldoc/themes/material/partials/footer.html deleted file mode 100644 index ba690f236..000000000 --- a/htmldoc/themes/material/partials/footer.html +++ /dev/null @@ -1,57 +0,0 @@ -{% import "partials/language.html" as lang with context %} - diff --git a/install-sh b/install-sh deleted file mode 100755 index 377bb8687..000000000 --- a/install-sh +++ /dev/null @@ -1,527 +0,0 @@ -#!/bin/sh -# install - install a program, script, or datafile - -scriptversion=2011-11-20.07; # UTC - -# This originates from X11R5 (mit/util/scripts/install.sh), which was -# later released in X11R6 (xc/config/util/install.sh) with the -# following copyright and license. -# -# Copyright (C) 1994 X Consortium -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN -# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- -# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -# Except as contained in this notice, the name of the X Consortium shall not -# be used in advertising or otherwise to promote the sale, use or other deal- -# ings in this Software without prior written authorization from the X Consor- -# tium. -# -# -# FSF changes to this file are in the public domain. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# 'make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. - -nl=' -' -IFS=" "" $nl" - -# set DOITPROG to echo to test this script - -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit=${DOITPROG-} -if test -z "$doit"; then - doit_exec=exec -else - doit_exec=$doit -fi - -# Put in absolute file names if you don't have them in your path; -# or use environment vars. - -chgrpprog=${CHGRPPROG-chgrp} -chmodprog=${CHMODPROG-chmod} -chownprog=${CHOWNPROG-chown} -cmpprog=${CMPPROG-cmp} -cpprog=${CPPROG-cp} -mkdirprog=${MKDIRPROG-mkdir} -mvprog=${MVPROG-mv} -rmprog=${RMPROG-rm} -stripprog=${STRIPPROG-strip} - -posix_glob='?' -initialize_posix_glob=' - test "$posix_glob" != "?" || { - if (set -f) 2>/dev/null; then - posix_glob= - else - posix_glob=: - fi - } -' - -posix_mkdir= - -# Desired mode of installed file. -mode=0755 - -chgrpcmd= -chmodcmd=$chmodprog -chowncmd= -mvcmd=$mvprog -rmcmd="$rmprog -f" -stripcmd= - -src= -dst= -dir_arg= -dst_arg= - -copy_on_change=false -no_target_directory= - -usage="\ -Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE - or: $0 [OPTION]... SRCFILES... DIRECTORY - or: $0 [OPTION]... -t DIRECTORY SRCFILES... - or: $0 [OPTION]... -d DIRECTORIES... - -In the 1st form, copy SRCFILE to DSTFILE. -In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. -In the 4th, create DIRECTORIES. - -Options: - --help display this help and exit. - --version display version info and exit. - - -c (ignored) - -C install only if different (preserve the last data modification time) - -d create directories instead of installing files. - -g GROUP $chgrpprog installed files to GROUP. - -m MODE $chmodprog installed files to MODE. - -o USER $chownprog installed files to USER. - -s $stripprog installed files. - -t DIRECTORY install into DIRECTORY. - -T report an error if DSTFILE is a directory. - -Environment variables override the default commands: - CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG - RMPROG STRIPPROG -" - -while test $# -ne 0; do - case $1 in - -c) ;; - - -C) copy_on_change=true;; - - -d) dir_arg=true;; - - -g) chgrpcmd="$chgrpprog $2" - shift;; - - --help) echo "$usage"; exit $?;; - - -m) mode=$2 - case $mode in - *' '* | *' '* | *' -'* | *'*'* | *'?'* | *'['*) - echo "$0: invalid mode: $mode" >&2 - exit 1;; - esac - shift;; - - -o) chowncmd="$chownprog $2" - shift;; - - -s) stripcmd=$stripprog;; - - -t) dst_arg=$2 - # Protect names problematic for 'test' and other utilities. - case $dst_arg in - -* | [=\(\)!]) dst_arg=./$dst_arg;; - esac - shift;; - - -T) no_target_directory=true;; - - --version) echo "$0 $scriptversion"; exit $?;; - - --) shift - break;; - - -*) echo "$0: invalid option: $1" >&2 - exit 1;; - - *) break;; - esac - shift -done - -if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then - # When -d is used, all remaining arguments are directories to create. - # When -t is used, the destination is already specified. - # Otherwise, the last argument is the destination. Remove it from $@. - for arg - do - if test -n "$dst_arg"; then - # $@ is not empty: it contains at least $arg. - set fnord "$@" "$dst_arg" - shift # fnord - fi - shift # arg - dst_arg=$arg - # Protect names problematic for 'test' and other utilities. - case $dst_arg in - -* | [=\(\)!]) dst_arg=./$dst_arg;; - esac - done -fi - -if test $# -eq 0; then - if test -z "$dir_arg"; then - echo "$0: no input file specified." >&2 - exit 1 - fi - # It's OK to call 'install-sh -d' without argument. - # This can happen when creating conditional directories. - exit 0 -fi - -if test -z "$dir_arg"; then - do_exit='(exit $ret); exit $ret' - trap "ret=129; $do_exit" 1 - trap "ret=130; $do_exit" 2 - trap "ret=141; $do_exit" 13 - trap "ret=143; $do_exit" 15 - - # Set umask so as not to create temps with too-generous modes. - # However, 'strip' requires both read and write access to temps. - case $mode in - # Optimize common cases. - *644) cp_umask=133;; - *755) cp_umask=22;; - - *[0-7]) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw='% 200' - fi - cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; - *) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw=,u+rw - fi - cp_umask=$mode$u_plus_rw;; - esac -fi - -for src -do - # Protect names problematic for 'test' and other utilities. - case $src in - -* | [=\(\)!]) src=./$src;; - esac - - if test -n "$dir_arg"; then - dst=$src - dstdir=$dst - test -d "$dstdir" - dstdir_status=$? - else - - # Waiting for this to be detected by the "$cpprog $src $dsttmp" command - # might cause directories to be created, which would be especially bad - # if $src (and thus $dsttmp) contains '*'. - if test ! -f "$src" && test ! -d "$src"; then - echo "$0: $src does not exist." >&2 - exit 1 - fi - - if test -z "$dst_arg"; then - echo "$0: no destination specified." >&2 - exit 1 - fi - dst=$dst_arg - - # If destination is a directory, append the input filename; won't work - # if double slashes aren't ignored. - if test -d "$dst"; then - if test -n "$no_target_directory"; then - echo "$0: $dst_arg: Is a directory" >&2 - exit 1 - fi - dstdir=$dst - dst=$dstdir/`basename "$src"` - dstdir_status=0 - else - # Prefer dirname, but fall back on a substitute if dirname fails. - dstdir=` - (dirname "$dst") 2>/dev/null || - expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$dst" : 'X\(//\)[^/]' \| \ - X"$dst" : 'X\(//\)$' \| \ - X"$dst" : 'X\(/\)' \| . 2>/dev/null || - echo X"$dst" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q' - ` - - test -d "$dstdir" - dstdir_status=$? - fi - fi - - obsolete_mkdir_used=false - - if test $dstdir_status != 0; then - case $posix_mkdir in - '') - # Create intermediate dirs using mode 755 as modified by the umask. - # This is like FreeBSD 'install' as of 1997-10-28. - umask=`umask` - case $stripcmd.$umask in - # Optimize common cases. - *[2367][2367]) mkdir_umask=$umask;; - .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; - - *[0-7]) - mkdir_umask=`expr $umask + 22 \ - - $umask % 100 % 40 + $umask % 20 \ - - $umask % 10 % 4 + $umask % 2 - `;; - *) mkdir_umask=$umask,go-w;; - esac - - # With -d, create the new directory with the user-specified mode. - # Otherwise, rely on $mkdir_umask. - if test -n "$dir_arg"; then - mkdir_mode=-m$mode - else - mkdir_mode= - fi - - posix_mkdir=false - case $umask in - *[123567][0-7][0-7]) - # POSIX mkdir -p sets u+wx bits regardless of umask, which - # is incompatible with FreeBSD 'install' when (umask & 300) != 0. - ;; - *) - tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ - trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 - - if (umask $mkdir_umask && - exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 - then - if test -z "$dir_arg" || { - # Check for POSIX incompatibilities with -m. - # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or - # other-writable bit of parent directory when it shouldn't. - # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. - ls_ld_tmpdir=`ls -ld "$tmpdir"` - case $ls_ld_tmpdir in - d????-?r-*) different_mode=700;; - d????-?--*) different_mode=755;; - *) false;; - esac && - $mkdirprog -m$different_mode -p -- "$tmpdir" && { - ls_ld_tmpdir_1=`ls -ld "$tmpdir"` - test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" - } - } - then posix_mkdir=: - fi - rmdir "$tmpdir/d" "$tmpdir" - else - # Remove any dirs left behind by ancient mkdir implementations. - rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null - fi - trap '' 0;; - esac;; - esac - - if - $posix_mkdir && ( - umask $mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" - ) - then : - else - - # The umask is ridiculous, or mkdir does not conform to POSIX, - # or it failed possibly due to a race condition. Create the - # directory the slow way, step by step, checking for races as we go. - - case $dstdir in - /*) prefix='/';; - [-=\(\)!]*) prefix='./';; - *) prefix='';; - esac - - eval "$initialize_posix_glob" - - oIFS=$IFS - IFS=/ - $posix_glob set -f - set fnord $dstdir - shift - $posix_glob set +f - IFS=$oIFS - - prefixes= - - for d - do - test X"$d" = X && continue - - prefix=$prefix$d - if test -d "$prefix"; then - prefixes= - else - if $posix_mkdir; then - (umask=$mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break - # Don't fail if two instances are running concurrently. - test -d "$prefix" || exit 1 - else - case $prefix in - *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; - *) qprefix=$prefix;; - esac - prefixes="$prefixes '$qprefix'" - fi - fi - prefix=$prefix/ - done - - if test -n "$prefixes"; then - # Don't fail if two instances are running concurrently. - (umask $mkdir_umask && - eval "\$doit_exec \$mkdirprog $prefixes") || - test -d "$dstdir" || exit 1 - obsolete_mkdir_used=true - fi - fi - fi - - if test -n "$dir_arg"; then - { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && - { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || - test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 - else - - # Make a couple of temp file names in the proper directory. - dsttmp=$dstdir/_inst.$$_ - rmtmp=$dstdir/_rm.$$_ - - # Trap to clean up those temp files at exit. - trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 - - # Copy the file name to the temp name. - (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && - - # and set any options; do chmod last to preserve setuid bits. - # - # If any of these fail, we abort the whole thing. If we want to - # ignore errors from any of these, just make sure not to ignore - # errors from the above "$doit $cpprog $src $dsttmp" command. - # - { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && - { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && - { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && - - # If -C, don't bother to copy if it wouldn't change the file. - if $copy_on_change && - old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && - new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && - - eval "$initialize_posix_glob" && - $posix_glob set -f && - set X $old && old=:$2:$4:$5:$6 && - set X $new && new=:$2:$4:$5:$6 && - $posix_glob set +f && - - test "$old" = "$new" && - $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 - then - rm -f "$dsttmp" - else - # Rename the file to the real destination. - $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || - - # The rename failed, perhaps because mv can't rename something else - # to itself, or perhaps because mv is so ancient that it does not - # support -f. - { - # Now remove or move aside any old file at destination location. - # We try this two ways since rm can't unlink itself on some - # systems and the destination file might be busy for other - # reasons. In this case, the final cleanup might fail but the new - # file should still install successfully. - { - test ! -f "$dst" || - $doit $rmcmd -f "$dst" 2>/dev/null || - { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && - { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } - } || - { echo "$0: cannot unlink or rename $dst" >&2 - (exit 1); exit 1 - } - } && - - # Now rename the file to the real destination. - $doit $mvcmd "$dsttmp" "$dst" - } - fi || exit 1 - - trap '' 0 - fi -done - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" -# time-stamp-end: "; # UTC" -# End: diff --git a/installer/.keep b/installer/.keep deleted file mode 100644 index e69de29bb..000000000 diff --git a/installer/README.md b/installer/README.md deleted file mode 100644 index cbcefab08..000000000 --- a/installer/README.md +++ /dev/null @@ -1,366 +0,0 @@ -# Installation -![image10](https://cloud.githubusercontent.com/assets/2662304/14253729/534c6f9c-fa95-11e5-8243-93eb0df719aa.gif) - -## Linux package managers - -You can install the latest release of netdata, using your package manager in - - - Arch Linux (`sudo pacman -S netdata`) - - Alpine Linux (`sudo apk add netdata`) - - Debian Linux (`sudo apt install netdata`) - - Gentoo Linux (`sudo emerge --ask netdata`) - - OpenSUSE (`sudo zypper install netdata`) - - Solus Linux (`sudo eopkg install netdata`) - - Ubuntu Linux >= 18.04 (`sudo apt install netdata`) - -Please note that the particular packages are not build by netdata. - -## Docker - -You can [Install netdata with Docker](../docker/#install-netdata-with-docker) - -## Linux one liner - -![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) - -To install netdata from source to your systems and keep it up to date automatically, run the following: - -:hash:**`bash <(curl -Ss https://my-netdata.io/kickstart.sh)`** - -(do not `sudo` this command, it will do it by itself as needed) - -The command: - -1. detects the distro and **installs the required system packages** for building netdata (will ask for confirmation) -2. downloads the latest netdata source tree to `/usr/src/netdata.git`. -3. installs netdata by running `./netdata-installer.sh` from the source tree. -4. installs `netdata-updater.sh` to `cron.daily`, so your netdata installation will be updated daily (you will get a message from cron only if the update fails). - -The `kickstart.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to change the installation directory, enable/disable plugins, etc (check below). - -For automated installs, append a space + `--dont-wait` to the command line. You can also append `--dont-start-it` to prevent the installer from starting netdata. Example: - -```sh -bash <(curl -Ss https://my-netdata.io/kickstart.sh) all --dont-wait --dont-start-it -``` - -## Linux 64bit pre-built static binary - -You can install a pre-compiled static binary of netdata for any Intel/AMD 64bit Linux system (even those that don't have a package manager, like CoreOS, CirrOS, busybox systems, etc). You can also use these packages on systems with broken or unsupported package managers. - -
![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-86400&label=today&units=installations&precision=0) - -To install netdata with a binary package on any Linux distro, any kernel version - for **Intel/AMD 64bit** hosts, run the following: - -:hash:  **`bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh)`** - -(do not `sudo` this command, it will do it by itself as needed; the target system does not need `bash` installed, check below for instructions to run it without `bash`) - -*Note: The static builds install netdata at `/opt/netdata`* - -For automated installs, append a space + `--dont-wait` to the command line. You can also append `--dont-start-it` to prevent the installer from starting netdata. Example: - -```sh -bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) --dont-wait --dont-start-it -``` - -If your shell fails to handle the above one liner, do this: - -```sh -# download the script with curl -curl https://my-netdata.io/kickstart-static64.sh >/tmp/kickstart-static64.sh - -# or, download the script with wget -wget -O /tmp/kickstart-static64.sh https://my-netdata.io/kickstart-static64.sh - -# run the downloaded script (any sh is fine, no need for bash) -sh /tmp/kickstart-static64.sh -``` - -The static binary files are kept in repo [binary-packages](https://github.com/netdata/binary-packages). You can download any of the `.run` files, and run it. These files are self-extracting shell scripts built with [makeself](https://github.com/megastep/makeself). The target system does **not** need to have bash installed. The same files can be used for updates too. - -## Other installation methods - -- **Linux manual installation from source** - - Semi-automatic, with more details about the steps involved and actions taken [here](#install-netdata-on-linux-manually) - -- **Non-Linux installation** - - [Install from package or source, on FreeBSD](#freebsd) - - [Install from package, on pfSense](#pfsense) - - [Enable netdata on FreeNAS Corral](#freenas) - - [Install from package or source, on macOS (OS X)](#macos) - - See also the list of netdata [package maintainers](../packaging/maintainers) for ASUSTOR NAS, OpenWRT, ReadyNAS, etc. - -## Install netdata on Linux manually - -To install the latest git version of netdata, please follow these 2 steps: - -1. [Prepare your system](#prepare-your-system) - - Install the required packages on your system. - -2. [Install netdata](#install-netdata) - - Download and install netdata. You can also update it the same way. - ---- - -### Prepare your system - -Try our experimental automatic requirements installer (no need to be root). This will try to find the packages that should be installed on your system to build and run netdata. It supports most major Linux distributions released after 2010: - -- **Alpine** Linux and its derivatives (you have to install `bash` yourself, before using the installer) -- **Arch** Linux and its derivatives -- **Gentoo** Linux and its derivatives -- **Debian** Linux and its derivatives (including **Ubuntu**, **Mint**) -- **Fedora** and its derivatives (including **Red Hat Enterprise Linux**, **CentOS**, **Amazon Machine Image**) -- **SuSe** Linux and its derivatives (including **openSuSe**) -- **SLE12** Must have your system registered with Suse Customer Center or have the DVD. See [#1162](https://github.com/netdata/netdata/issues/1162) - -Install the packages for having a **basic netdata installation** (system monitoring and many applications, without `mysql` / `mariadb`, `postgres`, `named`, hardware sensors and `SNMP`): - -```sh -curl -Ss 'https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh' >/tmp/kickstart.sh && bash /tmp/kickstart.sh -i netdata -``` - -Install all the required packages for **monitoring everything netdata can monitor**: - -```sh -curl -Ss 'https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh' >/tmp/kickstart.sh && bash /tmp/kickstart.sh -i netdata-all -``` - -If the above do not work for you, please [open a github issue](https://github.com/netdata/netdata/issues/new?title=packages%20installer%20failed&labels=installation%20help&body=The%20experimental%20packages%20installer%20failed.%0A%0AThis%20is%20what%20it%20says:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20screen%20here%0A%0A%60%60%60) with a copy of the message you get on screen. We are trying to make it work everywhere (this is also why the script [reports back](https://github.com/netdata/netdata/issues/2054) success or failure for all its runs). - ---- - -This is how to do it by hand: - -```sh -# Debian / Ubuntu -apt-get install zlib1g-dev uuid-dev libmnl-dev gcc make git autoconf autoconf-archive autogen automake pkg-config curl - -# Fedora -dnf install zlib-devel libuuid-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils - -# CentOS / Red Hat Enterprise Linux -yum install autoconf automake curl gcc git libmnl-devel libuuid-devel lm_sensors make MySQL-python nc pkgconfig python python-psycopg2 PyYAML zlib-devel - -``` - -Please note that for RHEL/CentOS you might need [EPEL](http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/). - -Once netdata is compiled, to run it the following packages are required (already installed using the above commands): - -package|description -:-----:|----------- -`libuuid`|part of `util-linux` for GUIDs management -`zlib`|gzip compression for the internal netdata web server - -*netdata will fail to start without the above.* - -netdata plugins and various aspects of netdata can be enabled or benefit when these are installed (they are optional): - -package|description -:-----:|----------- -`bash`|for shell plugins and **alarm notifications** -`curl`|for shell plugins and **alarm notifications** -`iproute` or `iproute2`|for monitoring **Linux traffic QoS**
use `iproute2` if `iproute` reports as not available or obsolete -`python`|for most of the external plugins -`python-yaml`|used for monitoring **beanstalkd** -`python-beanstalkc`|used for monitoring **beanstalkd** -`python-dnspython`|used for monitoring DNS query time -`python-ipaddress`|used for monitoring **DHCPd**
this package is required only if the system has python v2. python v3 has this functionality embedded -`python-mysqldb`
or
`python-pymysql`|used for monitoring **mysql** or **mariadb** databases
`python-mysqldb` is a lot faster and thus preferred -`python-psycopg2`|used for monitoring **postgresql** databases -`python-pymongo`|used for monitoring **mongodb** databases -`nodejs`|used for `node.js` plugins for monitoring **named** and **SNMP** devices -`lm-sensors`|for monitoring **hardware sensors** -`libmnl`|for collecting netfilter metrics -`netcat`|for shell plugins to collect metrics from remote systems - -*netdata will greatly benefit if you have the above packages installed, but it will still work without them.* - ---- - -### Install netdata - -Do this to install and run netdata: - -```sh - -# download it - the directory 'netdata' will be created -git clone https://github.com/netdata/netdata.git --depth=1 -cd netdata - -# run script with root privileges to build, install, start netdata -./netdata-installer.sh - -``` - -* If you don't want to run it straight-away, add `--dont-start-it` option. - -* If you don't want to install it on the default directories, you can run the installer like this: `./netdata-installer.sh --install /opt`. This one will install netdata in `/opt/netdata`. - -Once the installer completes, the file `/etc/netdata/netdata.conf` will be created (if you changed the installation directory, the configuration will appear in that directory too). - -You can edit this file to set options. One common option to tweak is `history`, which controls the size of the memory database netdata will use. By default is `3600` seconds (an hour of data at the charts) which makes netdata use about 10-15MB of RAM (depending on the number of charts detected on your system). Check **[[Memory Requirements]]**. - -To apply the changes you made, you have to restart netdata. - ---- - -## Other Systems - - - -##### FreeBSD - -You can install netdata from ports or packages collection. - -This is how to install the latest netdata version from sources on FreeBSD: - -```sh -# install required packages -pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof - -# download netdata -git clone https://github.com/netdata/netdata.git --depth=1 - -# install netdata in /opt/netdata -cd netdata -./netdata-installer.sh --install /opt -``` - -##### pfSense -To install netdata on pfSense run the following commands (within a shell or under Diagnostics/Command Prompt within the pfSense web interface). - -Change platform (i386/amd64, etc) and FreeBSD versions (10/11, etc) according to your environment and change netdata version (1.10.0 in example) according to latest version present within the FreeSBD repository:- - -Note first three packages are downloaded from the pfSense repository for maintaining compatibility with pfSense, netdata is downloaded from the FreeBSD repository. -``` -pkg install pkgconf -pkg install bash -pkg install e2fsprogs-libuuid -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/netdata-1.11.0.txz -``` -To start netdata manually run `service netdata onestart` - -To start netdata automatically at each boot add `service netdata start` as a Shellcmd within the pfSense web interface (under **Services/Shellcmd**, which you need to install beforehand under **System/Package Manager/Available Packages**). -Shellcmd Type should be set to `Shellcmd`. -![](https://user-images.githubusercontent.com/36808164/36930790-4db3aa84-1f0d-11e8-8752-cdc08bb7207c.png) -Alternatively more information can be found in https://doc.pfsense.org/index.php/Installing_FreeBSD_Packages, for achieving the same via the command line and scripts. - -If you experience an issue with `/usr/bin/install` absense on pfSense 2.3 or earlier, update pfSense or use workaround from [https://redmine.pfsense.org/issues/6643](https://redmine.pfsense.org/issues/6643) - -##### FreeNAS -On FreeNAS-Corral-RELEASE (>=10.0.3), netdata is pre-installed. - -To use netdata, the service will need to be enabled and started from the FreeNAS **[CLI](https://github.com/freenas/cli)**. - -To enable the netdata service: -``` -service netdata config set enable=true -``` - -To start the netdata service: -``` -service netdata start -``` - -##### macOS - -netdata on macOS still has limited charts, but external plugins do work. - -You can either install netdata with [Homebrew](https://brew.sh/) - -```sh -brew install netdata -``` - -or from source: - -```sh -# install Xcode Command Line Tools -xcode-select --install -``` -click `Install` in the software update popup window, then -```sh -# install HomeBrew package manager -/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" - -# install required packages -brew install ossp-uuid autoconf automake pkg-config - -# download netdata -git clone https://github.com/netdata/netdata.git --depth=1 - -# install netdata in /usr/local/netdata -cd netdata -sudo ./netdata-installer.sh --install /usr/local -``` - -The installer will also install a startup plist to start netdata when your Mac boots. - -##### Alpine 3.x - -Execute these commands to install netdata in Alpine Linux 3.x: - -``` -# install required packages -apk add alpine-sdk bash curl zlib-dev util-linux-dev libmnl-dev gcc make git autoconf automake pkgconfig python logrotate - -# if you plan to run node.js netdata plugins -apk add nodejs - -# download netdata - the directory 'netdata' will be created -git clone https://github.com/netdata/netdata.git --depth=1 -cd netdata - - -# build it, install it, start it -./netdata-installer.sh - - -# make netdata start at boot -echo -e "#!/usr/bin/env bash\n/usr/sbin/netdata" >/etc/local.d/netdata.start -chmod 755 /etc/local.d/netdata.start - -# make netdata stop at shutdown -echo -e "#!/usr/bin/env bash\nkillall netdata" >/etc/local.d/netdata.stop -chmod 755 /etc/local.d/netdata.stop - -# enable the local service to start automatically -rc-update add local -``` - -##### Synology - -The documentation previously recommended installing the Debian Chroot package from the Synology community package sources and then running netdata from within the chroot. This does not work, as the chroot environment does not have access to `/proc`, and therefore exposes very few metrics to netdata. Additionally, [this issue](https://github.com/SynoCommunity/spksrc/issues/2758), still open as of 2018/06/24, indicates that the Debian Chroot package is not suitable for DSM versions greater than version 5 and may corrupt system libraries and render the NAS unable to boot. - -The good news is that the 64-bit static installer works fine if your NAS is one that uses the amd64 architecture. It will install the content into `/opt/netdata`, making future removal safe and simple. - -###### Additional Work - -When netdata is first installed, it will run as _root_. This may or may not be acceptable for you, and since other installations run it as the _netdata_ user, you might wish to do the same. This requires some extra work: - -1. Creat a group `netdata` via the Synology group interface. Give it no access to anything. -2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign the user to the `netdata` group. Netdata will chuid to this user when running. -3. Change ownership of the following directories, as defined in [Netdata Security](../doc/netdata-security.md#netdata-security): - -``` -$ chown -R root:netdata /opt/netdata/usr/share/netdata -$ chown -R netdata:netdata /opt/netdata/var/lib/netdata /opt/netdata/var/cache/netdata -$ chown -R netdata:root /opt/netdata/var/log/netdata -``` - -Additionally, as of 2018/06/24, the netdata installer doesn't recognize DSM as an operating system, so no init script is installed. You'll have to do this manually: - -1. Add [this file](https://gist.github.com/oskapt/055d474d7bfef32c49469c1b53e8225f) as `/etc/rc.netdata`. Make it executable with `chmod 0755 /etc/rc.netdata`. -2. Edit `/etc/rc.local` and add a line calling `/etc/rc.netdata` to have it start on boot: - -``` -# Netdata startup -[ -x /etc/rc.netdata ] && /etc/rc.netdata start -``` diff --git a/installer/UNINSTALL.md b/installer/UNINSTALL.md deleted file mode 100644 index 4f9a84d03..000000000 --- a/installer/UNINSTALL.md +++ /dev/null @@ -1,36 +0,0 @@ -# Uninstalling netdata - -## netdata was installed from source (or `kickstart.sh`) - -The script `netdata-installer.sh` generates another script called `netdata-uninstaller.sh`. - -To uninstall netdata, run: - -``` -cd /path/to/netdata.git -./netdata-uninstaller.sh --force -``` - -The uninstaller will ask you to confirm all deletions. - -## netdata was installed with `kickstart-static64.sh` package - -Stop netdata with one of the following: - -- `service netdata stop` (non-systemd systems) -- `systemctl stop netdata` (systemd systems) - -Disable running netdata at startup, with one of the following (based on your distro): - -- `rc-update del netdata` -- `update-rc.d netdata disable` -- `chkconfig netdata off` -- `systemctl disable netdata` - -Delete the netdata files: - -1. `rm -rf /opt/netdata` -2. `groupdel netdata` -3. `userdel netdata` -4. `rm /etc/logrotate.d/netdata` -5. `rm /etc/systemd/system/netdata.service` or `rm /etc/init.d/netdata`, depending on the distro. diff --git a/installer/UPDATE.md b/installer/UPDATE.md deleted file mode 100644 index cda21fc0f..000000000 --- a/installer/UPDATE.md +++ /dev/null @@ -1,71 +0,0 @@ -# Updating netdata after its installation - -![image8](https://cloud.githubusercontent.com/assets/2662304/14253735/536f4580-fa95-11e5-9f7b-99112b31a5d7.gif) - - -We suggest to keep your netdata updated. We are actively developing it and you should always update to the latest version. - -The update procedure depends on how you installed it: - -## You downloaded it from github using git - -### Manual update - -The installer `netdata-installer.sh` generates a `netdata-updater.sh` script in the directory you downloaded netdata. -You can use this script to update your netdata installation with the same options you used to install it in the first place. -Just run it and it will download and install the latest version of netdata. The same script can be put in a cronjob to update your netdata at regular intervals. - -```sh -# go to the git downloaded directory -cd /path/to/git/downloaded/netdata - -# run the updater -./netdata-updater.sh -``` - -_Netdata will be restarted with the new version._ - -If you don't have this script (e.g. you deleted the directory where you downloaded netdata), just follow the **[[Installation]]** instructions again. The installer preserves your configuration. You can also update netdata to the latest version by hand, using this: - -```sh -# go to the git downloaded directory -cd /path/to/git/downloaded/netdata - -# download the latest version -git pull - -# rebuild it, install it, run it -./netdata-installer.sh -``` - -_Netdata will be restarted with the new version._ - -Keep in mind, netdata may now have new features, or certain old features may now behave differently. So pay some attention to it after updating. - -### Auto-update - -_Please, consider the risks of running an auto-update. Something can always go wrong. Keep an eye on your installation, and run a manual update if something ever fails._ - -You can call `netdata-updater.sh` from a cron-job. A successful update will not trigger an email from cron. - -```sh -# Edit your cron-jobs -crontab -e - -# add a cron-job at the bottom. This one will update netdata every day at 6:00AM: -# update netdata -0 6 * * * /path/to/git/downloaded/netdata/netdata-updater.sh -``` - -## You downloaded a binary package - -If you installed it from a binary package, the best way is to **obtain a newer copy** from the source you got it in the first place. - -If a newer version of netdata is not available from the source you got it, we suggest to uninstall the version you have and follow the **[[Installation]]** instructions for installing a fresh version of netdata. - - - - - - - diff --git a/installer/functions.sh b/installer/functions.sh deleted file mode 100644 index 155edd79a..000000000 --- a/installer/functions.sh +++ /dev/null @@ -1,866 +0,0 @@ -# no shebang necessary - this is a library to be sourced -# SPDX-License-Identifier: GPL-3.0-or-later -# shellcheck disable=SC1091,SC1117,SC2002,SC2004,SC2034,SC2046,SC2059,SC2086,SC2129,SC2148,SC2154,SC2155,SC2162,SC2166,SC2181,SC2193 - -# make sure we have a UID -[ -z "${UID}" ] && UID="$(id -u)" - - -# ----------------------------------------------------------------------------- -# checking the availability of commands - -which_cmd() { - # shellcheck disable=SC2230 - which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null -} - -check_cmd() { - which_cmd "${1}" >/dev/null 2>&1 && return 0 - return 1 -} - - -# ----------------------------------------------------------------------------- - -setup_terminal() { - TPUT_RESET="" - TPUT_BLACK="" - TPUT_RED="" - TPUT_GREEN="" - TPUT_YELLOW="" - TPUT_BLUE="" - TPUT_PURPLE="" - TPUT_CYAN="" - TPUT_WHITE="" - TPUT_BGBLACK="" - TPUT_BGRED="" - TPUT_BGGREEN="" - TPUT_BGYELLOW="" - TPUT_BGBLUE="" - TPUT_BGPURPLE="" - TPUT_BGCYAN="" - TPUT_BGWHITE="" - TPUT_BOLD="" - TPUT_DIM="" - TPUT_UNDERLINED="" - TPUT_BLINK="" - TPUT_INVERTED="" - TPUT_STANDOUT="" - TPUT_BELL="" - TPUT_CLEAR="" - - # Is stderr on the terminal? If not, then fail - test -t 2 || return 1 - - if check_cmd tput - then - if [ $(( $(tput colors 2>/dev/null) )) -ge 8 ] - then - # Enable colors - TPUT_RESET="$(tput sgr 0)" - TPUT_BLACK="$(tput setaf 0)" - TPUT_RED="$(tput setaf 1)" - TPUT_GREEN="$(tput setaf 2)" - TPUT_YELLOW="$(tput setaf 3)" - TPUT_BLUE="$(tput setaf 4)" - TPUT_PURPLE="$(tput setaf 5)" - TPUT_CYAN="$(tput setaf 6)" - TPUT_WHITE="$(tput setaf 7)" - TPUT_BGBLACK="$(tput setab 0)" - TPUT_BGRED="$(tput setab 1)" - TPUT_BGGREEN="$(tput setab 2)" - TPUT_BGYELLOW="$(tput setab 3)" - TPUT_BGBLUE="$(tput setab 4)" - TPUT_BGPURPLE="$(tput setab 5)" - TPUT_BGCYAN="$(tput setab 6)" - TPUT_BGWHITE="$(tput setab 7)" - TPUT_BOLD="$(tput bold)" - TPUT_DIM="$(tput dim)" - TPUT_UNDERLINED="$(tput smul)" - TPUT_BLINK="$(tput blink)" - TPUT_INVERTED="$(tput rev)" - TPUT_STANDOUT="$(tput smso)" - TPUT_BELL="$(tput bel)" - TPUT_CLEAR="$(tput clear)" - fi - fi - - return 0 -} -setup_terminal || echo >/dev/null - -progress() { - echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " -} - -# ----------------------------------------------------------------------------- - -netdata_banner() { - local l1=" ^" \ - l2=" |.-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-" \ - l3=" | '-' '-' '-' '-' '-' '-' '-' '-' '-' '-' '-' '-' " \ - l4=" +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->" \ - sp=" " \ - netdata="netdata" start end msg="${*}" chartcolor="${TPUT_DIM}" - - [ ${#msg} -lt ${#netdata} ] && msg="${msg}${sp:0:$(( ${#netdata} - ${#msg}))}" - [ ${#msg} -gt $(( ${#l2} - 20 )) ] && msg="${msg:0:$(( ${#l2} - 23 ))}..." - - start="$(( ${#l2} / 2 - 4 ))" - [ $(( start + ${#msg} + 4 )) -gt ${#l2} ] && start=$((${#l2} - ${#msg} - 4)) - end=$(( ${start} + ${#msg} + 4 )) - - echo >&2 - echo >&2 "${chartcolor}${l1}${TPUT_RESET}" - echo >&2 "${chartcolor}${l2:0:start}${sp:0:2}${TPUT_RESET}${TPUT_BOLD}${TPUT_GREEN}${netdata}${TPUT_RESET}${chartcolor}${sp:0:$((end - start - 2 - ${#netdata}))}${l2:end:$((${#l2} - end))}${TPUT_RESET}" - echo >&2 "${chartcolor}${l3:0:start}${sp:0:2}${TPUT_RESET}${TPUT_BOLD}${TPUT_CYAN}${msg}${TPUT_RESET}${chartcolor}${sp:0:2}${l3:end:$((${#l2} - end))}${TPUT_RESET}" - echo >&2 "${chartcolor}${l4}${TPUT_RESET}" - echo >&2 -} - -# ----------------------------------------------------------------------------- -# portable service command - -service_cmd="$(which_cmd service)" -rcservice_cmd="$(which_cmd rc-service)" -systemctl_cmd="$(which_cmd systemctl)" -service() { - local cmd="${1}" action="${2}" - - if [ ! -z "${systemctl_cmd}" ] - then - run "${systemctl_cmd}" "${action}" "${cmd}" - return $? - elif [ ! -z "${service_cmd}" ] - then - run "${service_cmd}" "${cmd}" "${action}" - return $? - elif [ ! -z "${rcservice_cmd}" ] - then - run "${rcservice_cmd}" "${cmd}" "${action}" - return $? - fi - return 1 -} - -# ----------------------------------------------------------------------------- -# portable pidof - -pidof_cmd="$(which_cmd pidof)" -pidof() { - if [ ! -z "${pidof_cmd}" ] - then - ${pidof_cmd} "${@}" - return $? - else - ps -acxo pid,comm |\ - sed "s/^ *//g" |\ - grep netdata |\ - cut -d ' ' -f 1 - return $? - fi -} - -# ----------------------------------------------------------------------------- -# portable delete recursively interactively - -portable_deletedir_recursively_interactively() { - if [ ! -z "$1" -a -d "$1" ] - then - if [ "$(uname -s)" = "Darwin" ] - then - echo >&2 - read >&2 -p "Press ENTER to recursively delete directory '$1' > " - echo >&2 "Deleting directory '$1' ..." - run rm -R "$1" - else - echo >&2 - echo >&2 "Deleting directory '$1' ..." - run rm -I -R "$1" - fi - else - echo "Directory '$1' does not exist." - fi -} - - -# ----------------------------------------------------------------------------- - -export SYSTEM_CPUS=1 -portable_find_processors() { - if [ -f "/proc/cpuinfo" ] - then - # linux - SYSTEM_CPUS=$(grep -c ^processor /proc/cpuinfo) - else - # freebsd - SYSTEM_CPUS=$(sysctl hw.ncpu 2>/dev/null | grep ^hw.ncpu | cut -d ' ' -f 2) - fi - [ -z "${SYSTEM_CPUS}" -o $(( SYSTEM_CPUS )) -lt 1 ] && SYSTEM_CPUS=1 -} -portable_find_processors - -# ----------------------------------------------------------------------------- - -run_ok() { - printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" -} - -run_failed() { - printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" -} - -ESCAPED_PRINT_METHOD= -printf "%q " test >/dev/null 2>&1 -[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq" -escaped_print() { - if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ] - then - printf "%q " "${@}" - else - printf "%s" "${*}" - fi - return 0 -} - -run_logfile="/dev/null" -run() { - local user="${USER--}" dir="${PWD}" info info_console - - if [ "${UID}" = "0" ] - then - info="[root ${dir}]# " - info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " - else - info="[${user} ${dir}]$ " - info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " - fi - - printf >> "${run_logfile}" "${info}" - escaped_print >> "${run_logfile}" "${@}" - printf >> "${run_logfile}" " ... " - - printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" - escaped_print >&2 "${@}" - printf >&2 "${TPUT_RESET}\n" - - "${@}" - - local ret=$? - if [ ${ret} -ne 0 ] - then - run_failed - printf >> "${run_logfile}" "FAILED with exit code ${ret}\n" - else - run_ok - printf >> "${run_logfile}" "OK\n" - fi - - return ${ret} -} - -getent_cmd="$(which_cmd getent)" -portable_check_user_exists() { - local username="${1}" found= - - if [ ! -z "${getent_cmd}" ] - then - "${getent_cmd}" passwd "${username}" >/dev/null 2>&1 - return $? - fi - - found="$(cut -d ':' -f 1 /dev/null 2>&1 - return $? - fi - - found="$(cut -d ':' -f 1 &2 "User '${username}' already exists." && return 0 - - echo >&2 "Adding ${username} user account with home ${homedir} ..." - - # shellcheck disable=SC2230 - local nologin="$(which nologin 2>/dev/null || command -v nologin 2>/dev/null || echo '/bin/false')" - - # Linux - if check_cmd useradd - then - run useradd -r -g "${username}" -c "${username}" -s "${nologin}" --no-create-home -d "${homedir}" "${username}" && return 0 - fi - - # FreeBSD - if check_cmd pw - then - run pw useradd "${username}" -d "${homedir}" -g "${username}" -s "${nologin}" && return 0 - fi - - # BusyBox - if check_cmd adduser - then - run adduser -h "${homedir}" -s "${nologin}" -D -G "${username}" "${username}" && return 0 - fi - - echo >&2 "Failed to add ${username} user account !" - - return 1 -} - -portable_add_group() { - local groupname="${1}" - - portable_check_group_exists "${groupname}" - [ $? -eq 0 ] && echo >&2 "Group '${groupname}' already exists." && return 0 - - echo >&2 "Adding ${groupname} user group ..." - - # Linux - if check_cmd groupadd - then - run groupadd -r "${groupname}" && return 0 - fi - - # FreeBSD - if check_cmd pw - then - run pw groupadd "${groupname}" && return 0 - fi - - # BusyBox - if check_cmd addgroup - then - run addgroup "${groupname}" && return 0 - fi - - echo >&2 "Failed to add ${groupname} user group !" - return 1 -} - -portable_add_user_to_group() { - local groupname="${1}" username="${2}" - - portable_check_group_exists "${groupname}" - [ $? -ne 0 ] && echo >&2 "Group '${groupname}' does not exist." && return 1 - - # find the user is already in the group - if portable_check_user_in_group "${username}" "${groupname}" - then - # username is already there - echo >&2 "User '${username}' is already in group '${groupname}'." - return 0 - else - # username is not in group - echo >&2 "Adding ${username} user to the ${groupname} group ..." - - # Linux - if check_cmd usermod - then - run usermod -a -G "${groupname}" "${username}" && return 0 - fi - - # FreeBSD - if check_cmd pw - then - run pw groupmod "${groupname}" -m "${username}" && return 0 - fi - - # BusyBox - if check_cmd addgroup - then - run addgroup "${username}" "${groupname}" && return 0 - fi - - echo >&2 "Failed to add user ${username} to group ${groupname} !" - return 1 - fi -} - -iscontainer() { - # man systemd-detect-virt - local cmd=$(which_cmd systemd-detect-virt) - if [ ! -z "${cmd}" -a -x "${cmd}" ] - then - "${cmd}" --container >/dev/null 2>&1 && return 0 - fi - - # /proc/1/sched exposes the host's pid of our init ! - # http://stackoverflow.com/a/37016302 - local pid=$( cat /proc/1/sched 2>/dev/null | head -n 1 | { IFS='(),#:' read name pid th threads; echo $pid; } ) - pid=$(( pid + 0 )) - [ ${pid} -ne 1 ] && return 0 - - # lxc sets environment variable 'container' - [ ! -z "${container}" ] && return 0 - - # docker creates /.dockerenv - # http://stackoverflow.com/a/25518345 - [ -f "/.dockerenv" ] && return 0 - - # ubuntu and debian supply /bin/running-in-container - # https://www.apt-browse.org/browse/ubuntu/trusty/main/i386/upstart/1.12.1-0ubuntu4/file/bin/running-in-container - if [ -x "/bin/running-in-container" ] - then - "/bin/running-in-container" >/dev/null 2>&1 && return 0 - fi - - return 1 -} - -issystemd() { - local pids p myns ns systemctl - - # if the directory /lib/systemd/system OR /usr/lib/systemd/system (SLES 12.x) does not exit, it is not systemd - [ ! -d /lib/systemd/system -a ! -d /usr/lib/systemd/system ] && return 1 - - # if there is no systemctl command, it is not systemd - # shellcheck disable=SC2230 - systemctl=$(which systemctl 2>/dev/null || command -v systemctl 2>/dev/null) - [ -z "${systemctl}" -o ! -x "${systemctl}" ] && return 1 - - # if pid 1 is systemd, it is systemd - [ "$(basename $(readlink /proc/1/exe) 2>/dev/null)" = "systemd" ] && return 0 - - # if systemd is not running, it is not systemd - pids=$(pidof systemd 2>/dev/null) - [ -z "${pids}" ] && return 1 - - # check if the running systemd processes are not in our namespace - myns="$(readlink /proc/self/ns/pid 2>/dev/null)" - for p in ${pids} - do - ns="$(readlink /proc/${p}/ns/pid 2>/dev/null)" - - # if pid of systemd is in our namespace, it is systemd - [ ! -z "${myns}" ] && [ "${myns}" = "${ns}" ] && return 0 - done - - # else, it is not systemd - return 1 -} - -install_non_systemd_init() { - [ "${UID}" != 0 ] && return 1 - - local key="unknown" - if [ -f /etc/os-release ] - then - source /etc/os-release || return 1 - key="${ID}-${VERSION_ID}" - - elif [ -f /etc/redhat-release ] - then - key=$(&2 "Installing OpenRC init file..." - run cp system/netdata-openrc /etc/init.d/netdata && \ - run chmod 755 /etc/init.d/netdata && \ - run rc-update add netdata default && \ - return 0 - - elif [ "${key}" = "debian-7" \ - -o "${key}" = "ubuntu-12.04" \ - -o "${key}" = "ubuntu-14.04" \ - ] - then - echo >&2 "Installing LSB init file..." - run cp system/netdata-lsb /etc/init.d/netdata && \ - run chmod 755 /etc/init.d/netdata && \ - run update-rc.d netdata defaults && \ - run update-rc.d netdata enable && \ - return 0 - elif [[ "${key}" =~ ^(amzn-201[5678]|ol|CentOS release 6|Red Hat Enterprise Linux Server release 6|Scientific Linux CERN SLC release 6|CloudLinux Server release 6).* ]] - then - echo >&2 "Installing init.d file..." - run cp system/netdata-init-d /etc/init.d/netdata && \ - run chmod 755 /etc/init.d/netdata && \ - run chkconfig netdata on && \ - return 0 - else - echo >&2 "I don't know what init file to install on system '${key}'. Open a github issue to help us fix it." - return 1 - fi - elif [ -f /etc/init.d/netdata ] - then - echo >&2 "file '/etc/init.d/netdata' already exists." - return 0 - else - echo >&2 "I don't know what init file to install on system '${key}'. Open a github issue to help us fix it." - fi - - return 1 -} - -NETDATA_START_CMD="netdata" -NETDATA_STOP_CMD="killall netdata" - -install_netdata_service() { - local uname="$(uname 2>/dev/null)" - - if [ "${UID}" -eq 0 ] - then - if [ "${uname}" = "Darwin" ] - then - - if [ -f "/Library/LaunchDaemons/com.github.netdata.plist" ] - then - echo >&2 "file '/Library/LaunchDaemons/com.github.netdata.plist' already exists." - return 0 - else - echo >&2 "Installing MacOS X plist file..." - run cp system/netdata.plist /Library/LaunchDaemons/com.github.netdata.plist && \ - run launchctl load /Library/LaunchDaemons/com.github.netdata.plist && \ - return 0 - fi - - elif [ "${uname}" = "FreeBSD" ] - then - - run cp system/netdata-freebsd /etc/rc.d/netdata && \ - NETDATA_START_CMD="service netdata start" && \ - NETDATA_STOP_CMD="service netdata stop" && \ - return 0 - - elif issystemd - then - # systemd is running on this system - NETDATA_START_CMD="systemctl start netdata" - NETDATA_STOP_CMD="systemctl stop netdata" - - SYSTEMD_DIRECTORY="" - - if [ -d "/lib/systemd/system" ] - then - SYSTEMD_DIRECTORY="/lib/systemd/system" - elif [ -d "/usr/lib/systemd/system" ] - then - SYSTEMD_DIRECTORY="/usr/lib/systemd/system" - fi - - if [ "${SYSTEMD_DIRECTORY}x" != "x" ] - then - echo >&2 "Installing systemd service..." - run cp system/netdata.service "${SYSTEMD_DIRECTORY}/netdata.service" && \ - run systemctl daemon-reload && \ - run systemctl enable netdata && \ - return 0 - else - echo >&2 "no systemd directory; cannot install netdata.service" - fi - else - install_non_systemd_init - local ret=$? - - if [ ${ret} -eq 0 ] - then - if [ ! -z "${service_cmd}" ] - then - NETDATA_START_CMD="service netdata start" - NETDATA_STOP_CMD="service netdata stop" - elif [ ! -z "${rcservice_cmd}" ] - then - NETDATA_START_CMD="rc-service netdata start" - NETDATA_STOP_CMD="rc-service netdata stop" - fi - fi - - return ${ret} - fi - fi - - return 1 -} - - -# ----------------------------------------------------------------------------- -# stop netdata - -pidisnetdata() { - if [ -d /proc/self ] - then - [ -z "$1" -o ! -f "/proc/$1/stat" ] && return 1 - [ "$(cat "/proc/$1/stat" | cut -d '(' -f 2 | cut -d ')' -f 1)" = "netdata" ] && return 0 - return 1 - fi - return 0 -} - -stop_netdata_on_pid() { - local pid="${1}" ret=0 count=0 - - pidisnetdata ${pid} || return 0 - - printf >&2 "Stopping netdata on pid ${pid} ..." - while [ ! -z "$pid" -a ${ret} -eq 0 ] - do - if [ ${count} -gt 45 ] - then - echo >&2 "Cannot stop the running netdata on pid ${pid}." - return 1 - fi - - count=$(( count + 1 )) - - run kill ${pid} 2>/dev/null - ret=$? - - test ${ret} -eq 0 && printf >&2 "." && sleep 2 - done - - echo >&2 - if [ ${ret} -eq 0 ] - then - echo >&2 "SORRY! CANNOT STOP netdata ON PID ${pid} !" - return 1 - fi - - echo >&2 "netdata on pid ${pid} stopped." - return 0 -} - -netdata_pids() { - local p myns ns - - myns="$(readlink /proc/self/ns/pid 2>/dev/null)" - - # echo >&2 "Stopping a (possibly) running netdata (namespace '${myns}')..." - - for p in \ - $(cat /var/run/netdata.pid 2>/dev/null) \ - $(cat /var/run/netdata/netdata.pid 2>/dev/null) \ - $(pidof netdata 2>/dev/null) - do - ns="$(readlink /proc/${p}/ns/pid 2>/dev/null)" - - if [ -z "${myns}" -o -z "${ns}" -o "${myns}" = "${ns}" ] - then - pidisnetdata ${p} && echo "${p}" - fi - done -} - -stop_all_netdata() { - local p - for p in $(netdata_pids) - do - stop_netdata_on_pid ${p} - done -} - -# ----------------------------------------------------------------------------- -# restart netdata - -restart_netdata() { - local netdata="${1}" - shift - - local started=0 - - progress "Start netdata" - - if [ "${UID}" -eq 0 ] - then - service netdata stop - stop_all_netdata - service netdata restart && started=1 - - if [ ${started} -eq 1 -a -z "$(netdata_pids)" ] - then - echo >&2 "Ooops! it seems netdata is not started." - started=0 - fi - - if [ ${started} -eq 0 ] - then - service netdata start && started=1 - fi - fi - - if [ ${started} -eq 1 -a -z "$(netdata_pids)" ] - then - echo >&2 "Hm... it seems netdata is still not started." - started=0 - fi - - if [ ${started} -eq 0 ] - then - # still not started... - - run stop_all_netdata - run "${netdata}" "${@}" - return $? - fi - - return 0 -} - -# ----------------------------------------------------------------------------- -# install netdata logrotate - -install_netdata_logrotate() { - if [ ${UID} -eq 0 ] - then - if [ -d /etc/logrotate.d ] - then - if [ ! -f /etc/logrotate.d/netdata ] - then - run cp system/netdata.logrotate /etc/logrotate.d/netdata - fi - - if [ -f /etc/logrotate.d/netdata ] - then - run chmod 644 /etc/logrotate.d/netdata - fi - - return 0 - fi - fi - - return 1 -} - -# ----------------------------------------------------------------------------- -# download netdata.conf - -fix_netdata_conf() { - local owner="${1}" - - if [ "${UID}" -eq 0 ] - then - run chown "${owner}" "${filename}" - fi - run chmod 0664 "${filename}" -} - -generate_netdata_conf() { - local owner="${1}" filename="${2}" url="${3}" - - if [ ! -s "${filename}" ] - then - cat >"${filename}" <&2 - echo >&2 "-------------------------------------------------------------------------------" - echo >&2 - echo >&2 "Downloading default configuration from netdata..." - sleep 5 - - # remove a possibly obsolete download - [ -f "${filename}.new" ] && rm "${filename}.new" - - # disable a proxy to get data from the local netdata - export http_proxy= - export https_proxy= - - # try curl - run curl -s -o "${filename}.new" "${url}" - ret=$? - - if [ ${ret} -ne 0 -o ! -s "${filename}.new" ] - then - # try wget - run wget -O "${filename}.new" "${url}" - ret=$? - fi - - if [ ${ret} -eq 0 -a -s "${filename}.new" ] - then - run mv "${filename}.new" "${filename}" - run_ok "New configuration saved for you to edit at ${filename}" - else - [ -f "${filename}.new" ] && rm "${filename}.new" - run_failed "Cannnot download configuration from netdata daemon using url '${url}'" - - generate_netdata_conf "${owner}" "${filename}" "${url}" - fi - - fix_netdata_conf "${owner}" - fi -} - - -# ----------------------------------------------------------------------------- -# add netdata user and group - -NETDATA_WANTED_GROUPS="docker nginx varnish haproxy adm nsd proxy squid ceph nobody" -NETDATA_ADDED_TO_GROUPS="" -add_netdata_user_and_group() { - local homedir="${1}" g - - if [ ${UID} -eq 0 ] - then - portable_add_group netdata || return 1 - portable_add_user netdata "${homedir}" || return 1 - - for g in ${NETDATA_WANTED_GROUPS} - do - portable_add_user_to_group ${g} netdata && NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} ${g}" - done - - [ ~netdata = / ] && cat </dev/null || command -v "${1}" 2>/dev/null -} - -check_cmd() { - which_cmd "${1}" >/dev/null 2>&1 && return 0 - return 1 -} - -setup_terminal() { - TPUT_RESET="" - TPUT_BLACK="" - TPUT_RED="" - TPUT_GREEN="" - TPUT_YELLOW="" - TPUT_BLUE="" - TPUT_PURPLE="" - TPUT_CYAN="" - TPUT_WHITE="" - TPUT_BGBLACK="" - TPUT_BGRED="" - TPUT_BGGREEN="" - TPUT_BGYELLOW="" - TPUT_BGBLUE="" - TPUT_BGPURPLE="" - TPUT_BGCYAN="" - TPUT_BGWHITE="" - TPUT_BOLD="" - TPUT_DIM="" - TPUT_UNDERLINED="" - TPUT_BLINK="" - TPUT_INVERTED="" - TPUT_STANDOUT="" - TPUT_BELL="" - TPUT_CLEAR="" - - # Is stderr on the terminal? If not, then fail - test -t 2 || return 1 - - if check_cmd tput - then - if [ $(( $(tput colors 2>/dev/null) )) -ge 8 ] - then - # Enable colors - TPUT_RESET="$(tput sgr 0)" - TPUT_BLACK="$(tput setaf 0)" - TPUT_RED="$(tput setaf 1)" - TPUT_GREEN="$(tput setaf 2)" - TPUT_YELLOW="$(tput setaf 3)" - TPUT_BLUE="$(tput setaf 4)" - TPUT_PURPLE="$(tput setaf 5)" - TPUT_CYAN="$(tput setaf 6)" - TPUT_WHITE="$(tput setaf 7)" - TPUT_BGBLACK="$(tput setab 0)" - TPUT_BGRED="$(tput setab 1)" - TPUT_BGGREEN="$(tput setab 2)" - TPUT_BGYELLOW="$(tput setab 3)" - TPUT_BGBLUE="$(tput setab 4)" - TPUT_BGPURPLE="$(tput setab 5)" - TPUT_BGCYAN="$(tput setab 6)" - TPUT_BGWHITE="$(tput setab 7)" - TPUT_BOLD="$(tput bold)" - TPUT_DIM="$(tput dim)" - TPUT_UNDERLINED="$(tput smul)" - TPUT_BLINK="$(tput blink)" - TPUT_INVERTED="$(tput rev)" - TPUT_STANDOUT="$(tput smso)" - TPUT_BELL="$(tput bel)" - TPUT_CLEAR="$(tput clear)" - fi - fi - - return 0 -} -setup_terminal || echo >/dev/null - -progress() { - echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " -} - -run_ok() { - printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" -} - -run_failed() { - printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" -} - -ESCAPED_PRINT_METHOD= -printf "%q " test >/dev/null 2>&1 -[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq" -escaped_print() { - if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ] - then - printf "%q " "${@}" - else - printf "%s" "${*}" - fi - return 0 -} - -run_logfile="/dev/null" -run() { - local user="${USER--}" dir="${PWD}" info info_console - - if [ "${UID}" = "0" ] - then - info="[root ${dir}]# " - info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " - else - info="[${user} ${dir}]$ " - info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " - fi - - printf >> "${run_logfile}" "${info}" - escaped_print >> "${run_logfile}" "${@}" - printf >> "${run_logfile}" " ... " - - printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" - escaped_print >&2 "${@}" - printf >&2 "${TPUT_RESET}\n" - - "${@}" - - local ret=$? - if [ ${ret} -ne 0 ] - then - run_failed - printf >> "${run_logfile}" "FAILED with exit code ${ret}\n" - else - run_ok - printf >> "${run_logfile}" "OK\n" - fi - - return ${ret} -} - - -# --------------------------------------------------------------------------------------------------------------------- - -fatal() { - printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n" - exit 1 -} - -# --------------------------------------------------------------------------------------------------------------------- - -if [ "$(uname -m)" != "x86_64" ] - then - fatal "Static binary versions of netdata are available only for 64bit Intel/AMD CPUs (x86_64), but yours is: $(uname -m)." -fi - -if [ "$(uname -s)" != "Linux" ] - then - fatal "Static binary versions of netdata are available only for Linux, but this system is $(uname -s)" -fi - -curl="$(which_cmd curl)" -wget="$(which_cmd wget)" - -# --------------------------------------------------------------------------------------------------------------------- - -progress "Checking the latest version of static build..." - -BASE='https://raw.githubusercontent.com/netdata/binary-packages/master' - -LATEST= -if [ ! -z "${curl}" -a -x "${curl}" ] -then - LATEST="$(run ${curl} "${BASE}/netdata-latest.gz.run")" -elif [ ! -z "${wget}" -a -x "${wget}" ] -then - LATEST="$(run ${wget} -O - "${BASE}/netdata-latest.gz.run")" -else - fatal "curl or wget are needed for this script to work." -fi - -if [ -z "${LATEST}" ] - then - fatal "Cannot find the latest static binary version of netdata." -fi - -# --------------------------------------------------------------------------------------------------------------------- - -progress "Downloading static netdata binary: ${LATEST}" - -ret=1 -if [ ! -z "${curl}" -a -x "${curl}" ] -then - run ${curl} "${BASE}/${LATEST}" >"/tmp/${LATEST}" - ret=$? -elif [ ! -z "${wget}" -a -x "${wget}" ] -then - run ${wget} -O "/tmp/${LATEST}" "${BASE}/${LATEST}" - ret=$? -else - fatal "curl or wget are needed for this script to work." -fi - -if [ ${ret} -ne 0 -o ! -s "/tmp/${LATEST}" ] - then - fatal "Failed to download the latest static binary version of netdata." -fi - -# --------------------------------------------------------------------------------------------------------------------- - -opts= -inner_opts= -while [ ! -z "${1}" ] -do - if [ "${1}" = "--dont-wait" -o "${1}" = "--non-interactive" -o "${1}" = "--accept" ] - then - opts="${opts} --accept" - elif [ "${1}" = "--dont-start-it" ] - then - inner_opts="${inner_opts} ${1}" - else - echo >&2 "Unknown option '${1}'" - exit 1 - fi - shift -done -[ ! -z "${inner_opts}" ] && inner_opts="-- ${inner_opts}" - -# --------------------------------------------------------------------------------------------------------------------- - -progress "Installing netdata" - -sudo= -[ "${UID}" != "0" ] && sudo="sudo" -run ${sudo} sh "/tmp/${LATEST}" ${opts} ${inner_opts} - -if [ $? -eq 0 ] - then - rm "/tmp/${LATEST}" -else - echo >&2 "NOTE: did not remove: /tmp/${LATEST}" -fi diff --git a/kickstart.sh b/kickstart.sh deleted file mode 100755 index b493802f2..000000000 --- a/kickstart.sh +++ /dev/null @@ -1,377 +0,0 @@ -#!/usr/bin/env sh -# SPDX-License-Identifier: GPL-3.0-or-later -# -# Run me with: -# -# bash <(curl -Ss https://my-netdata.io/kickstart.sh) -# -# or (to install all netdata dependencies): -# -# bash <(curl -Ss https://my-netdata.io/kickstart.sh) all -# -# Other options: -# --src-dir PATH keep netdata.git at PATH/netdata.git -# --dont-wait do not prompt for user input -# --non-interactive do not prompt for user input -# --no-updates do not install script for daily updates -# -# This script will: -# -# 1. install all netdata compilation dependencies -# using the package manager of the system -# -# 2. download netdata source code in /usr/src/netdata.git -# -# 3. install netdata - -# shellcheck disable=SC1117,SC2016,SC2034,SC2039,SC2059,SC2086,SC2119,SC2120,SC2129,SC2162,SC2166,SC2181 - -umask 022 - -[ -z "${UID}" ] && UID="$(id -u)" - -# --------------------------------------------------------------------------------------------------------------------- -# library functions copied from installer/functions.sh - -which_cmd() { - # shellcheck disable=SC2230 - which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null -} - -check_cmd() { - which_cmd "${1}" >/dev/null 2>&1 && return 0 - return 1 -} - -setup_terminal() { - TPUT_RESET="" - TPUT_BLACK="" - TPUT_RED="" - TPUT_GREEN="" - TPUT_YELLOW="" - TPUT_BLUE="" - TPUT_PURPLE="" - TPUT_CYAN="" - TPUT_WHITE="" - TPUT_BGBLACK="" - TPUT_BGRED="" - TPUT_BGGREEN="" - TPUT_BGYELLOW="" - TPUT_BGBLUE="" - TPUT_BGPURPLE="" - TPUT_BGCYAN="" - TPUT_BGWHITE="" - TPUT_BOLD="" - TPUT_DIM="" - TPUT_UNDERLINED="" - TPUT_BLINK="" - TPUT_INVERTED="" - TPUT_STANDOUT="" - TPUT_BELL="" - TPUT_CLEAR="" - - # Is stderr on the terminal? If not, then fail - test -t 2 || return 1 - - if check_cmd tput - then - if [ $(( $(tput colors 2>/dev/null) )) -ge 8 ] - then - # Enable colors - TPUT_RESET="$(tput sgr 0)" - TPUT_BLACK="$(tput setaf 0)" - TPUT_RED="$(tput setaf 1)" - TPUT_GREEN="$(tput setaf 2)" - TPUT_YELLOW="$(tput setaf 3)" - TPUT_BLUE="$(tput setaf 4)" - TPUT_PURPLE="$(tput setaf 5)" - TPUT_CYAN="$(tput setaf 6)" - TPUT_WHITE="$(tput setaf 7)" - TPUT_BGBLACK="$(tput setab 0)" - TPUT_BGRED="$(tput setab 1)" - TPUT_BGGREEN="$(tput setab 2)" - TPUT_BGYELLOW="$(tput setab 3)" - TPUT_BGBLUE="$(tput setab 4)" - TPUT_BGPURPLE="$(tput setab 5)" - TPUT_BGCYAN="$(tput setab 6)" - TPUT_BGWHITE="$(tput setab 7)" - TPUT_BOLD="$(tput bold)" - TPUT_DIM="$(tput dim)" - TPUT_UNDERLINED="$(tput smul)" - TPUT_BLINK="$(tput blink)" - TPUT_INVERTED="$(tput rev)" - TPUT_STANDOUT="$(tput smso)" - TPUT_BELL="$(tput bel)" - TPUT_CLEAR="$(tput clear)" - fi - fi - - return 0 -} -setup_terminal || echo >/dev/null - -progress() { - echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " -} - -run_ok() { - printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" -} - -run_failed() { - printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" -} - -ESCAPED_PRINT_METHOD= -printf "%q " test >/dev/null 2>&1 -[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq" -escaped_print() { - if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ] - then - printf "%q " "${@}" - else - printf "%s" "${*}" - fi - return 0 -} - -run_logfile="/dev/null" -run() { - local user="${USER--}" dir="${PWD}" info info_console - - if [ "${UID}" = "0" ] - then - info="[root ${dir}]# " - info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " - else - info="[${user} ${dir}]$ " - info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " - fi - - printf >> "${run_logfile}" "${info}" - escaped_print >> "${run_logfile}" "${@}" - printf >> "${run_logfile}" " ... " - - printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" - escaped_print >&2 "${@}" - printf >&2 "${TPUT_RESET}\n" - - "${@}" - - local ret=$? - if [ ${ret} -ne 0 ] - then - run_failed - printf >> "${run_logfile}" "FAILED with exit code ${ret}\n" - else - run_ok - printf >> "${run_logfile}" "OK\n" - fi - - return ${ret} -} - - -# --------------------------------------------------------------------------------------------------------------------- -# collect system information - -fatal() { - printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n" - exit 1 -} - -export PATH="${PATH}:/usr/local/bin:/usr/local/sbin" - -curl="$(which_cmd curl)" -wget="$(which_cmd wget)" -bash="$(which_cmd bash)" - -if [ -z "${BASH_VERSION}" ] -then - # we don't run under bash - if [ ! -z "${bash}" -a -x "${bash}" ] - then - BASH_MAJOR_VERSION=$(${bash} -c 'echo "${BASH_VERSINFO[0]}"') - fi -else - # we run under bash - BASH_MAJOR_VERSION="${BASH_VERSINFO[0]}" -fi - -HAS_BASH4=1 -if [ -z "${BASH_MAJOR_VERSION}" ] -then - echo >&2 "No BASH is available on this system" - HAS_BASH4=0 -elif [ $((BASH_MAJOR_VERSION)) -lt 4 ] -then - echo >&2 "No BASH v4+ is available on this system (installed bash is v${BASH_MAJOR_VERSION}" - HAS_BASH4=0 -fi - -SYSTEM="$(uname -s)" -OS="$(uname -o)" -MACHINE="$(uname -m)" - -cat <&2 "netdata source will be installed at ${SOURCE_DST}/netdata.git" - shift 2 - elif [ "${1}" = "--no-updates" ] - then - # echo >&2 "netdata will not auto-update" - NETDATA_UPDATES= - shift 1 - else - break - fi -done - -if [ "${INTERACTIVE}" = "0" ] -then - PACKAGES_INSTALLER_OPTIONS="--dont-wait --non-interactive ${PACKAGES_INSTALLER_OPTIONS}" - NETDATA_INSTALLER_OPTIONS="--dont-wait" -fi - -# echo "PACKAGES_INSTALLER_OPTIONS=${PACKAGES_INSTALLER_OPTIONS}" -# echo "NETDATA_INSTALLER_OPTIONS=${NETDATA_INSTALLER_OPTIONS} ${*}" - -if [ "${OS}" = "GNU/Linux" -o "${SYSTEM}" = "Linux" ] -then - if [ "${HAS_BASH4}" = "1" ] - then - tmp="$(mktemp /tmp/netdata-kickstart-XXXXXX)" - url="https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh" - - progress "Downloading script to detect required packages..." - if [ ! -z "${curl}" ] - then - run ${curl} "${url}" >"${tmp}" || fatal "Cannot download ${url}" - elif [ ! -z "${wget}" ] - then - run "${wget}" -O - "${url}" >"${tmp}" || fatal "Cannot download ${url}" - else - rm "${tmp}" - fatal "I need curl or wget to proceed, but neither is available on this system." - fi - - ask=0 - if [ -s "${tmp}" ] - then - progress "Running downloaded script to detect required packages..." - run ${sudo} "${bash}" "${tmp}" ${PACKAGES_INSTALLER_OPTIONS} || ask=1 - rm "${tmp}" - else - rm "${tmp}" - fatal "Downloaded script is empty..." - fi - - if [ "${ask}" = "1" ] - then - echo >&2 "It failed to install all the required packages, but I can try to install netdata." - read -p "Press ENTER to continue to netdata installation > " - progress "OK, let's give it a try..." - fi - else - echo >&2 "WARNING" - echo >&2 "Cannot detect the packages to be installed in this system, without BASH v4+." - echo >&2 "We can only attempt to install netdata..." - echo >&2 - fi -else - echo >&2 "WARNING" - echo >&2 "Cannot detect the packages to be installed on a ${SYSTEM} - ${OS} system." - echo >&2 "We can only attempt to install netdata..." - echo >&2 -fi - - -# --------------------------------------------------------------------------------------------------------------------- -# download netdata source - -# this has to checked after we have installed the required packages -git="$(which_cmd git)" - -NETDATA_SOURCE_DIR= -if [ ! -z "${git}" -a -x "${git}" ] -then - [ ! -d "${SOURCE_DST}" ] && run ${sudo} mkdir -p "${SOURCE_DST}" - - if [ ! -d "${SOURCE_DST}/netdata.git" ] - then - progress "Downloading netdata source code..." - run ${sudo} ${git} clone https://github.com/netdata/netdata.git "${SOURCE_DST}/netdata.git" || fatal "Cannot download netdata source" - cd "${SOURCE_DST}/netdata.git" || fatal "Cannot cd to netdata source tree" - else - progress "Updating netdata source code..." - cd "${SOURCE_DST}/netdata.git" || fatal "Cannot cd to netdata source tree" - run ${sudo} ${git} fetch --all || fatal "Cannot fetch netdata source updates" - run ${sudo} ${git} reset --hard origin/master || fatal "Cannot update netdata source tree" - fi - NETDATA_SOURCE_DIR="${SOURCE_DST}/netdata.git" -else - fatal "Cannot find the command 'git' to download the netdata source code." -fi - - -# --------------------------------------------------------------------------------------------------------------------- -# install netdata from source - -if [ ! -z "${NETDATA_SOURCE_DIR}" -a -d "${NETDATA_SOURCE_DIR}" ] -then - cd "${NETDATA_SOURCE_DIR}" || fatal "Cannot cd to netdata source tree" - - install=0 - if [ -x netdata-updater.sh ] - then - # attempt to run the updater, to respect any compilation settings already in place - progress "Re-installing netdata..." - run ${sudo} ./netdata-updater.sh -f || install=1 - else - install=1 - fi - - if [ "${install}" = "1" ] - then - if [ -x netdata-installer.sh ] - then - progress "Installing netdata..." - run ${sudo} ./netdata-installer.sh ${NETDATA_UPDATES} ${NETDATA_INSTALLER_OPTIONS} "${@}" || \ - fatal "netdata-installer.sh exited with error" - else - fatal "Cannot install netdata from source (the source directory does not include netdata-installer.sh)." - fi - fi -else - fatal "Cannot install netdata from source, on this system (cannot download the source code)." -fi diff --git a/libnetdata/Makefile.in b/libnetdata/Makefile.in deleted file mode 100644 index 4ee89a69b..000000000 --- a/libnetdata/Makefile.in +++ /dev/null @@ -1,664 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - adaptive_resortable_list \ - avl \ - buffer \ - clocks \ - config \ - dictionary \ - eval \ - locks \ - log \ - popen \ - procfile \ - simple_pattern \ - socket \ - statistical \ - storage_number \ - threads \ - url \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/README.md b/libnetdata/README.md index 545f95984..9892d6703 100644 --- a/libnetdata/README.md +++ b/libnetdata/README.md @@ -4,3 +4,5 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/adaptive_resortable_list/Makefile.in b/libnetdata/adaptive_resortable_list/Makefile.in deleted file mode 100644 index 229511083..000000000 --- a/libnetdata/adaptive_resortable_list/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/adaptive_resortable_list -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/adaptive_resortable_list/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/adaptive_resortable_list/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/adaptive_resortable_list/README.md b/libnetdata/adaptive_resortable_list/README.md index 0ba3ec9b5..ab0d7c5a8 100644 --- a/libnetdata/adaptive_resortable_list/README.md +++ b/libnetdata/adaptive_resortable_list/README.md @@ -91,3 +91,5 @@ Compared to unoptimized code (test No 1: 4.6sec): Do not use ARL if the a name/keyword may appear more than once in the source data. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fadaptive_resortable_list%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/avl/Makefile.in b/libnetdata/avl/Makefile.in deleted file mode 100644 index 5982eb85b..000000000 --- a/libnetdata/avl/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/avl -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/avl/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/avl/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/avl/README.md b/libnetdata/avl/README.md index 48212a715..c4c72ff44 100644 --- a/libnetdata/avl/README.md +++ b/libnetdata/avl/README.md @@ -8,4 +8,5 @@ use any memory allocations and their memory footprint is optimized (by eliminating non-necessary data members). In addition to the above, this version of AVL, provides versions using locks -and traversal functions. \ No newline at end of file +and traversal functions. +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Favl%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/buffer/Makefile.in b/libnetdata/buffer/Makefile.in deleted file mode 100644 index 21bbd3cc9..000000000 --- a/libnetdata/buffer/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/buffer -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/buffer/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/buffer/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/buffer/README.md b/libnetdata/buffer/README.md index a7cfef89d..48072e96e 100644 --- a/libnetdata/buffer/README.md +++ b/libnetdata/buffer/README.md @@ -8,4 +8,5 @@ Also, they are super fast in printing and appending data to the string and its ` is just a lookup (it does not traverse the string). Netdata uses `BUFFER`s for preparing web responses and buffering data to be sent upstream or -to backend databases. \ No newline at end of file +to backend databases. +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fbuffer%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/clocks/Makefile.in b/libnetdata/clocks/Makefile.in deleted file mode 100644 index 118d60317..000000000 --- a/libnetdata/clocks/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/clocks -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/clocks/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/clocks/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/clocks/README.md b/libnetdata/clocks/README.md index e69de29bb..c4215a755 100644 --- a/libnetdata/clocks/README.md +++ b/libnetdata/clocks/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fclocks%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/config/Makefile.in b/libnetdata/config/Makefile.in deleted file mode 100644 index 940ccb8d8..000000000 --- a/libnetdata/config/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/config -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/config/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/config/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/config/README.md b/libnetdata/config/README.md index 5e170caa9..f0a27d951 100644 --- a/libnetdata/config/README.md +++ b/libnetdata/config/README.md @@ -44,3 +44,5 @@ Last, what about options you believe you have set, but you misspelled? When you get the configuration file from the server, there will be a comment above all `name = value` pairs the server does not use. So you know that whatever you wrote there, is not used. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/config/appconfig.c b/libnetdata/config/appconfig.c index 411538446..9e6a0c02c 100644 --- a/libnetdata/config/appconfig.c +++ b/libnetdata/config/appconfig.c @@ -538,6 +538,7 @@ void appconfig_generate(struct config *root, BUFFER *wb, int only_changed) || !strcmp(co->name, CONFIG_SECTION_WEB) || !strcmp(co->name, CONFIG_SECTION_STATSD) || !strcmp(co->name, CONFIG_SECTION_PLUGINS) + || !strcmp(co->name, CONFIG_SECTION_CLOUD) || !strcmp(co->name, CONFIG_SECTION_REGISTRY) || !strcmp(co->name, CONFIG_SECTION_HEALTH) || !strcmp(co->name, CONFIG_SECTION_BACKEND) diff --git a/libnetdata/config/appconfig.h b/libnetdata/config/appconfig.h index 6ac666d8b..78099aad4 100644 --- a/libnetdata/config/appconfig.h +++ b/libnetdata/config/appconfig.h @@ -86,6 +86,7 @@ #define CONFIG_SECTION_WEB "web" #define CONFIG_SECTION_STATSD "statsd" #define CONFIG_SECTION_PLUGINS "plugins" +#define CONFIG_SECTION_CLOUD "cloud" #define CONFIG_SECTION_REGISTRY "registry" #define CONFIG_SECTION_HEALTH "health" #define CONFIG_SECTION_BACKEND "backend" diff --git a/libnetdata/dictionary/Makefile.in b/libnetdata/dictionary/Makefile.in deleted file mode 100644 index bafd64a8a..000000000 --- a/libnetdata/dictionary/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/dictionary -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/dictionary/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/dictionary/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/dictionary/README.md b/libnetdata/dictionary/README.md index e69de29bb..9c705227a 100644 --- a/libnetdata/dictionary/README.md +++ b/libnetdata/dictionary/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fdictionary%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/dictionary/dictionary.c b/libnetdata/dictionary/dictionary.c index dd94a801d..cfcf1fbab 100644 --- a/libnetdata/dictionary/dictionary.c +++ b/libnetdata/dictionary/dictionary.c @@ -292,3 +292,38 @@ int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *data return ret; } + +static int dictionary_walker_name_value(avl *a, int (*callback)(char *name, void *entry, void *data), void *data) { + int total = 0, ret = 0; + + if(a->avl_link[0]) { + ret = dictionary_walker_name_value(a->avl_link[0], callback, data); + if(ret < 0) return ret; + total += ret; + } + + ret = callback(((NAME_VALUE *)a)->name, ((NAME_VALUE *)a)->value, data); + if(ret < 0) return ret; + total += ret; + + if(a->avl_link[1]) { + ret = dictionary_walker_name_value(a->avl_link[1], callback, data); + if (ret < 0) return ret; + total += ret; + } + + return total; +} + +int dictionary_get_all_name_value(DICTIONARY *dict, int (*callback)(char *name, void *entry, void *data), void *data) { + int ret = 0; + + dictionary_read_lock(dict); + + if(likely(dict->values_index.root)) + ret = dictionary_walker_name_value(dict->values_index.root, callback, data); + + dictionary_unlock(dict); + + return ret; +} diff --git a/libnetdata/dictionary/dictionary.h b/libnetdata/dictionary/dictionary.h index 61b9bfc61..9be261eb2 100644 --- a/libnetdata/dictionary/dictionary.h +++ b/libnetdata/dictionary/dictionary.h @@ -44,5 +44,6 @@ extern void *dictionary_get(DICTIONARY *dict, const char *name); extern int dictionary_del(DICTIONARY *dict, const char *name); extern int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *d), void *data); +extern int dictionary_get_all_name_value(DICTIONARY *dict, int (*callback)(char *name, void *entry, void *d), void *data); #endif /* NETDATA_DICTIONARY_H */ diff --git a/libnetdata/eval/Makefile.in b/libnetdata/eval/Makefile.in deleted file mode 100644 index 0b8341b03..000000000 --- a/libnetdata/eval/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/eval -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/eval/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/eval/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/eval/README.md b/libnetdata/eval/README.md index e69de29bb..e7b4579a9 100644 --- a/libnetdata/eval/README.md +++ b/libnetdata/eval/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Feval%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/libnetdata.c b/libnetdata/libnetdata.c index de330cae8..095c38c25 100644 --- a/libnetdata/libnetdata.c +++ b/libnetdata/libnetdata.c @@ -50,7 +50,7 @@ static inline void print_allocations(const char *file, const char *function, con type, size ); - fprintf(stderr, "%s iteration %zu MEMORY ALLOCATIONS: (%04lu@%-40.40s:%-40.40s): Allocated %zd KB (%+zd B), mmapped %zd KB (%+zd B): %s : malloc %zd (%+zd), calloc %zd (%+zd), realloc %zd (%+zd), strdup %zd (%+zd), free %zd (%+zd)\n", + fprintf(stderr, "%s iteration %zu MEMORY ALLOCATIONS: (%04lu@%-40.40s:%-40.40s): Allocated %zd KiB (%+zd B), mmapped %zd KiB (%+zd B): %s : malloc %zd (%+zd), calloc %zd (%+zd), realloc %zd (%+zd), strdup %zd (%+zd), free %zd (%+zd)\n", netdata_thread_tag(), log_thread_memory_allocations, line, file, function, @@ -1381,17 +1381,19 @@ void recursive_config_double_dir_load(const char *user_path, const char *stock_p } } - if(de->d_type == DT_REG || de->d_type == DT_LNK) { + if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { size_t len = strlen(de->d_name); if(path_is_file(udir, de->d_name) && len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { char *filename = strdupz_path_subpath(udir, de->d_name); + debug(D_HEALTH, "CONFIG calling callback for user file '%s'", filename); callback(filename, data); freez(filename); + continue; } - else - debug(D_HEALTH, "CONFIG ignoring user-config file '%s/%s'", udir, de->d_name); } + + debug(D_HEALTH, "CONFIG ignoring user-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); } closedir(dir); @@ -1426,22 +1428,27 @@ void recursive_config_double_dir_load(const char *user_path, const char *stock_p } } - if(de->d_type == DT_REG || de->d_type == DT_LNK) { + if(de->d_type == DT_UNKNOWN || de->d_type == DT_REG || de->d_type == DT_LNK) { size_t len = strlen(de->d_name); if(path_is_file(sdir, de->d_name) && !path_is_file(udir, de->d_name) && len > 5 && !strcmp(&de->d_name[len - 5], ".conf")) { char *filename = strdupz_path_subpath(sdir, de->d_name); + debug(D_HEALTH, "CONFIG calling callback for stock file '%s'", filename); callback(filename, data); freez(filename); + continue; } - else - debug(D_HEALTH, "CONFIG ignoring stock config file '%s/%s'", sdir, de->d_name); + } + + debug(D_HEALTH, "CONFIG ignoring stock-config file '%s/%s' of type %d", udir, de->d_name, (int)de->d_type); } closedir(dir); } + debug(D_HEALTH, "CONFIG done traversing user-config directory '%s', stock config directory '%s'", udir, sdir); + freez(udir); freez(sdir); } diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h index 0bac3dc8f..8d9be336b 100644 --- a/libnetdata/libnetdata.h +++ b/libnetdata/libnetdata.h @@ -283,8 +283,8 @@ extern void recursive_config_double_dir_load( extern void netdata_cleanup_and_exit(int ret) NORETURN; +extern void send_statistics(const char *action, const char *action_result, const char *action_data); extern char *netdata_configured_host_prefix; - #include "os.h" #include "storage_number/storage_number.h" #include "threads/threads.h" diff --git a/libnetdata/locks/Makefile.in b/libnetdata/locks/Makefile.in deleted file mode 100644 index 38520f78e..000000000 --- a/libnetdata/locks/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/locks -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/locks/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/locks/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/locks/README.md b/libnetdata/locks/README.md index e69de29bb..0f01e8c56 100644 --- a/libnetdata/locks/README.md +++ b/libnetdata/locks/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Flocks%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/log/Makefile.in b/libnetdata/log/Makefile.in deleted file mode 100644 index abf812b67..000000000 --- a/libnetdata/log/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/log -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/log/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/log/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/log/README.md b/libnetdata/log/README.md index e69de29bb..28e3c3f72 100644 --- a/libnetdata/log/README.md +++ b/libnetdata/log/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Flog%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/log/log.c b/libnetdata/log/log.c index 198e98bd9..66a923f85 100644 --- a/libnetdata/log/log.c +++ b/libnetdata/log/log.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later +#include #include "../libnetdata.h" int web_server_is_multithreaded = 1; @@ -376,6 +377,8 @@ void error_int( const char *prefix, const char *file, const char *function, cons } void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { + // save a copy of errno - just in case this function generates a new error + int __errno = errno; va_list args; if(error_log_syslog) { @@ -400,6 +403,12 @@ void fatal_int( const char *file, const char *function, const unsigned long line log_unlock(); + char action_data[70+1]; + snprintfz(action_data, 70, "%04lu@%-10.10s:%-15.15s/%d", line, file, function, __errno); + char action_result[60+1]; + snprintfz(action_result, 60, "%s:%s",program_name, netdata_thread_tag()); + send_statistics("FATAL", action_result, action_data); + netdata_cleanup_and_exit(1); } diff --git a/libnetdata/log/log.h b/libnetdata/log/log.h index 48e1599a7..44670f31c 100644 --- a/libnetdata/log/log.h +++ b/libnetdata/log/log.h @@ -85,6 +85,7 @@ static inline void debug_dummy(void) {} #define error(args...) error_int("ERROR", __FILE__, __FUNCTION__, __LINE__, ##args) #define fatal(args...) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args) +extern void send_statistics(const char *action, const char *action_result, const char *action_data); extern void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5); extern void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5); extern void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6); diff --git a/libnetdata/popen/Makefile.in b/libnetdata/popen/Makefile.in deleted file mode 100644 index 0a699332f..000000000 --- a/libnetdata/popen/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/popen -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/popen/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/popen/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/popen/README.md b/libnetdata/popen/README.md index e69de29bb..5a83b6022 100644 --- a/libnetdata/popen/README.md +++ b/libnetdata/popen/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fpopen%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/procfile/Makefile.in b/libnetdata/procfile/Makefile.in deleted file mode 100644 index 643e00ba1..000000000 --- a/libnetdata/procfile/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/procfile -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/procfile/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/procfile/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/procfile/README.md b/libnetdata/procfile/README.md index 279885f93..7037dc4ee 100644 --- a/libnetdata/procfile/README.md +++ b/libnetdata/procfile/README.md @@ -59,3 +59,5 @@ To achieve this kind of performance, the library tries to work in batches so tha and the data are inside the processor's caches. This library is extensively used in netdata and its plugins. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fprocfile%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/simple_pattern/Makefile.in b/libnetdata/simple_pattern/Makefile.in deleted file mode 100644 index 575850f03..000000000 --- a/libnetdata/simple_pattern/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/simple_pattern -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/simple_pattern/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/simple_pattern/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/simple_pattern/README.md b/libnetdata/simple_pattern/README.md index 22ccf373a..79a713168 100644 --- a/libnetdata/simple_pattern/README.md +++ b/libnetdata/simple_pattern/README.md @@ -34,3 +34,5 @@ netdata stops processing to the first positive or negative match (left to right). If it is not matched by either positive or negative patterns, it is denied at the end. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fsimple_pattern%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/socket/Makefile.in b/libnetdata/socket/Makefile.in deleted file mode 100644 index 45f13d068..000000000 --- a/libnetdata/socket/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/socket -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/socket/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/socket/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/socket/README.md b/libnetdata/socket/README.md index e69de29bb..e42756075 100644 --- a/libnetdata/socket/README.md +++ b/libnetdata/socket/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fsocket%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c index c266efeb4..6b0b3b674 100644 --- a/libnetdata/socket/socket.c +++ b/libnetdata/socket/socket.c @@ -248,7 +248,7 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p return sock; } -static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, uint16_t port) { +static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, uint16_t port, int acl_flags) { if(sockets->opened >= MAX_LISTEN_FDS) { error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype); close(fd); @@ -259,6 +259,7 @@ static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family sockets->fds_types[sockets->opened] = socktype; sockets->fds_families[sockets->opened] = family; sockets->fds_names[sockets->opened] = strdup_client_description(family, protocol, ip, port); + sockets->fds_acl_flags[sockets->opened] = acl_flags; sockets->opened++; return 0; @@ -300,8 +301,20 @@ void listen_sockets_close(LISTEN_SOCKETS *sockets) { sockets->failed = 0; } +WEB_CLIENT_ACL read_acl(char *st) { + if (!strcmp(st,"dashboard")) return WEB_CLIENT_ACL_DASHBOARD; + if (!strcmp(st,"registry")) return WEB_CLIENT_ACL_REGISTRY; + if (!strcmp(st,"badges")) return WEB_CLIENT_ACL_BADGE; + if (!strcmp(st,"management")) return WEB_CLIENT_ACL_MGMT; + if (!strcmp(st,"streaming")) return WEB_CLIENT_ACL_STREAMING; + if (!strcmp(st,"netdata.conf")) return WEB_CLIENT_ACL_NETDATACONF; + return WEB_CLIENT_ACL_NONE; +} + static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, uint16_t default_port, int listen_backlog) { int added = 0; + WEB_CLIENT_ACL acl_flags = WEB_CLIENT_ACL_NONE; + struct addrinfo hints; struct addrinfo *result = NULL, *rp = NULL; @@ -311,7 +324,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, char buffer2[10 + 1]; snprintfz(buffer2, 10, "%d", default_port); - char *ip = buffer, *port = buffer2, *interface = "";; + char *ip = buffer, *port = buffer2, *interface = "", *portconfig;; int protocol = IPPROTO_TCP, socktype = SOCK_STREAM; const char *protocol_str = "tcp"; @@ -332,14 +345,13 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, char *path = ip + 5; socktype = SOCK_STREAM; protocol_str = "unix"; - int fd = create_listen_socket_unix(path, listen_backlog); if (fd == -1) { error("LISTENER: Cannot create unix socket '%s'", path); sockets->failed++; - } - else { - listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0); + } else { + acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; + listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0, acl_flags); added++; } return added; @@ -355,19 +367,40 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, } } else { - while(*e && *e != ':' && *e != '%') e++; + while(*e && *e != ':' && *e != '%' && *e != '=') e++; } if(*e == '%') { *e = '\0'; e++; interface = e; - while(*e && *e != ':') e++; + while(*e && *e != ':' && *e != '=') e++; } if(*e == ':') { port = e + 1; *e = '\0'; + e++; + while(*e && *e != '=') e++; + } + + if(*e == '=') { + *e='\0'; + e++; + portconfig = e; + while (*e != '\0') { + if (*e == '|') { + *e = '\0'; + acl_flags |= read_acl(portconfig); + e++; + portconfig = e; + continue; + } + e++; + } + acl_flags |= read_acl(portconfig); + } else { + acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING; } uint32_t scope_id = 0; @@ -435,7 +468,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, sockets->failed++; } else { - listen_sockets_add(sockets, fd, family, socktype, protocol_str, rip, rport); + listen_sockets_add(sockets, fd, family, socktype, protocol_str, rip, rport, acl_flags); added++; } } @@ -975,6 +1008,7 @@ int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *clien inline POLLINFO *poll_add_fd(POLLJOB *p , int fd , int socktype + , WEB_CLIENT_ACL port_acl , uint32_t flags , const char *client_ip , const char *client_port @@ -1013,6 +1047,8 @@ inline POLLINFO *poll_add_fd(POLLJOB *p p->inf[i].slot = (size_t)i; p->inf[i].flags = 0; p->inf[i].socktype = -1; + p->inf[i].port_acl = -1; + p->inf[i].client_ip = NULL; p->inf[i].client_port = NULL; p->inf[i].del_callback = p->del_callback; @@ -1042,6 +1078,7 @@ inline POLLINFO *poll_add_fd(POLLJOB *p pi->fd = fd; pi->p = p; pi->socktype = socktype; + pi->port_acl = port_acl; pi->flags = flags; pi->next = NULL; pi->client_ip = strdupz(client_ip); @@ -1230,7 +1267,7 @@ static void poll_events_process(POLLJOB *p, POLLINFO *pi, struct pollfd *pf, sho #ifdef NETDATA_INTERNAL_CHECKS // this is common - it is used for web server file copies if(unlikely(!(pf->events & (POLLIN|POLLOUT)))) { - error("POLLFD: LISTENER: after reading, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"", pi->client_port?pi->client_port:""); + error("POLLFD: LISTENER: after reading, client slot %zu (fd %d) from %s port %s was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"", pi->client_port?pi->client_port:""); //poll_close_fd(pi); //return; } @@ -1272,6 +1309,7 @@ static void poll_events_process(POLLJOB *p, POLLINFO *pi, struct pollfd *pf, sho poll_add_fd(p , nfd , SOCK_STREAM + , pi->port_acl , POLLINFO_FLAG_CLIENT_SOCKET , client_ip , client_port @@ -1331,7 +1369,7 @@ static void poll_events_process(POLLJOB *p, POLLINFO *pi, struct pollfd *pf, sho #ifdef NETDATA_INTERNAL_CHECKS // this is common - it is used for streaming if(unlikely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET && !(pf->events & (POLLIN|POLLOUT)))) { - error("POLLFD: LISTENER: after sending, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"", pi->client_port?pi->client_port:""); + error("POLLFD: LISTENER: after sending, client slot %zu (fd %d) from %s port %s was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"", pi->client_port?pi->client_port:""); //poll_close_fd(pi); //return; } @@ -1414,6 +1452,7 @@ void poll_events(LISTEN_SOCKETS *sockets POLLINFO *pi = poll_add_fd(&p , sockets->fds[i] , sockets->fds_types[i] + , sockets->fds_acl_flags[i] , POLLINFO_FLAG_SERVER_SOCKET , (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN" , "" @@ -1457,7 +1496,7 @@ void poll_events(LISTEN_SOCKETS *sockets } usec_t dt_usec = next_timer_usec - now_usec; - if(dt_usec > 1000 * USEC_PER_MS) + if(dt_usec < 1000 * USEC_PER_MS) timeout_ms = 1000; else timeout_ms = (int)(dt_usec / USEC_PER_MS); @@ -1503,7 +1542,7 @@ void poll_events(LISTEN_SOCKETS *sockets if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) { if (unlikely(pi->send_count == 0 && p.complete_request_timeout > 0 && (now - pi->connected_t) >= p.complete_request_timeout)) { - info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' has not sent a complete request in %zu seconds - closing it. " + info("POLLFD: LISTENER: client slot %zu (fd %d) from %s port %s has not sent a complete request in %zu seconds - closing it. " , i , pi->fd , pi->client_ip ? pi->client_ip : "" @@ -1513,7 +1552,7 @@ void poll_events(LISTEN_SOCKETS *sockets poll_close_fd(pi); } else if(unlikely(pi->recv_count && p.idle_timeout > 0 && now - ((pi->last_received_t > pi->last_sent_t) ? pi->last_received_t : pi->last_sent_t) >= p.idle_timeout )) { - info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' is idle for more than %zu seconds - closing it. " + info("POLLFD: LISTENER: client slot %zu (fd %d) from %s port %s is idle for more than %zu seconds - closing it. " , i , pi->fd , pi->client_ip ? pi->client_ip : "" diff --git a/libnetdata/socket/socket.h b/libnetdata/socket/socket.h index f5412b63d..c69d4897f 100644 --- a/libnetdata/socket/socket.h +++ b/libnetdata/socket/socket.h @@ -9,6 +9,24 @@ #define MAX_LISTEN_FDS 50 #endif +typedef enum web_client_acl { + WEB_CLIENT_ACL_NONE = 0, + WEB_CLIENT_ACL_NOCHECK = 0, + WEB_CLIENT_ACL_DASHBOARD = 1 << 0, + WEB_CLIENT_ACL_REGISTRY = 1 << 1, + WEB_CLIENT_ACL_BADGE = 1 << 2, + WEB_CLIENT_ACL_MGMT = 1 << 3, + WEB_CLIENT_ACL_STREAMING = 1 << 4, + WEB_CLIENT_ACL_NETDATACONF = 1 << 5 +} WEB_CLIENT_ACL; + +#define web_client_can_access_dashboard(w) ((w)->acl & WEB_CLIENT_ACL_DASHBOARD) +#define web_client_can_access_registry(w) ((w)->acl & WEB_CLIENT_ACL_REGISTRY) +#define web_client_can_access_badges(w) ((w)->acl & WEB_CLIENT_ACL_BADGE) +#define web_client_can_access_mgmt(w) ((w)->acl & WEB_CLIENT_ACL_MGMT) +#define web_client_can_access_stream(w) ((w)->acl & WEB_CLIENT_ACL_STREAMING) +#define web_client_can_access_netdataconf(w) ((w)->acl & WEB_CLIENT_ACL_NETDATACONF) + typedef struct listen_sockets { struct config *config; // the config file to use const char *config_section; // the netdata configuration section to read settings from @@ -22,6 +40,7 @@ typedef struct listen_sockets { char *fds_names[MAX_LISTEN_FDS]; // descriptions for the open sockets int fds_types[MAX_LISTEN_FDS]; // the socktype for the open sockets (SOCK_STREAM, SOCK_DGRAM) int fds_families[MAX_LISTEN_FDS]; // the family of the open sockets (AF_UNIX, AF_INET, AF_INET6) + WEB_CLIENT_ACL fds_acl_flags[MAX_LISTEN_FDS]; // the acl to apply to the open sockets (dashboard, badges, streaming, netdata.conf, management) } LISTEN_SOCKETS; extern char *strdup_client_description(int family, const char *protocol, const char *ip, uint16_t port); @@ -73,6 +92,7 @@ typedef struct pollinfo { int fd; // the file descriptor int socktype; // the client socket type + WEB_CLIENT_ACL port_acl; // the access lists permitted on this web server port (it's -1 for client sockets) char *client_ip; // the connected client IP char *client_port; // the connected client port @@ -138,6 +158,7 @@ extern void *poll_default_add_callback(POLLINFO *pi, short int *events, void *da extern POLLINFO *poll_add_fd(POLLJOB *p , int fd , int socktype + , WEB_CLIENT_ACL port_acl , uint32_t flags , const char *client_ip , const char *client_port diff --git a/libnetdata/statistical/Makefile.in b/libnetdata/statistical/Makefile.in deleted file mode 100644 index 8209a1ecb..000000000 --- a/libnetdata/statistical/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/statistical -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/statistical/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/statistical/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/statistical/README.md b/libnetdata/statistical/README.md index e69de29bb..184f82b4d 100644 --- a/libnetdata/statistical/README.md +++ b/libnetdata/statistical/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fstatistical%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/storage_number/Makefile.in b/libnetdata/storage_number/Makefile.in deleted file mode 100644 index 1ab5afd87..000000000 --- a/libnetdata/storage_number/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/storage_number -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/storage_number/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/storage_number/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/storage_number/README.md b/libnetdata/storage_number/README.md index 206c42b87..5c2c0b074 100644 --- a/libnetdata/storage_number/README.md +++ b/libnetdata/storage_number/README.md @@ -8,3 +8,5 @@ with a precision of 0.00001 (yes, it's a floating point number, meaning that hig have less decimal precision) and 3 bits for flags. This provides an extremely optimized memory footprint with just 0.0001% max accuracy loss. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fstorage_number%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/threads/Makefile.in b/libnetdata/threads/Makefile.in deleted file mode 100644 index 0a4460bcc..000000000 --- a/libnetdata/threads/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/threads -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/threads/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/threads/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/threads/README.md b/libnetdata/threads/README.md index e69de29bb..9f98ba16e 100644 --- a/libnetdata/threads/README.md +++ b/libnetdata/threads/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Fthreads%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/libnetdata/url/Makefile.in b/libnetdata/url/Makefile.in deleted file mode 100644 index 8ccc4fd64..000000000 --- a/libnetdata/url/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = libnetdata/url -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu libnetdata/url/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu libnetdata/url/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/libnetdata/url/README.md b/libnetdata/url/README.md index e69de29bb..7562ffb7f 100644 --- a/libnetdata/url/README.md +++ b/libnetdata/url/README.md @@ -0,0 +1,2 @@ + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Flibnetdata%2Furl%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/makeself/Makefile.am b/makeself/Makefile.am deleted file mode 100644 index f6f9167a0..000000000 --- a/makeself/Makefile.am +++ /dev/null @@ -1,26 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -dist_noinst_DATA = \ - $(NULL) - -dist_noinst_SCRIPTS = \ - build.sh \ - makeself.sh \ - makeself-license.txt \ - install-alpine-packages.sh \ - post-installer.sh \ - jobs/10-prepare-destination.install.sh \ - jobs/50-curl-7.60.0.install.sh \ - jobs/50-bash-4.4.18.install.sh \ - jobs/50-fping-4.0.install.sh \ - jobs/70-netdata-git.install.sh \ - jobs/99-makeself.install.sh \ - run-all-jobs.sh \ - install-or-update.sh \ - build-x86_64-static.sh \ - makeself-header.sh \ - makeself-help-header.txt \ - makeself.lsm \ - functions.sh \ - $(NULL) diff --git a/makeself/Makefile.in b/makeself/Makefile.in deleted file mode 100644 index 8962dbbc1..000000000 --- a/makeself/Makefile.in +++ /dev/null @@ -1,485 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = makeself -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -SCRIPTS = $(dist_noinst_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ - -# SPDX-License-Identifier: GPL-3.0-or-later -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - $(NULL) - -dist_noinst_SCRIPTS = \ - build.sh \ - makeself.sh \ - makeself-license.txt \ - install-alpine-packages.sh \ - post-installer.sh \ - jobs/10-prepare-destination.install.sh \ - jobs/50-curl-7.60.0.install.sh \ - jobs/50-bash-4.4.18.install.sh \ - jobs/50-fping-4.0.install.sh \ - jobs/70-netdata-git.install.sh \ - jobs/99-makeself.install.sh \ - run-all-jobs.sh \ - install-or-update.sh \ - build-x86_64-static.sh \ - makeself-header.sh \ - makeself-help-header.txt \ - makeself.lsm \ - functions.sh \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu makeself/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu makeself/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/makeself/build-x86_64-static.sh b/makeself/build-x86_64-static.sh deleted file mode 100755 index b0902512c..000000000 --- a/makeself/build-x86_64-static.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -. $(dirname "$0")/../installer/functions.sh || exit 1 - -set -e - -DOCKER_CONTAINER_NAME="netdata-package-x86_64-static-alpine37" - -if ! sudo docker inspect "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1 -then - # To run interactively: - # sudo docker run -it netdata-package-x86_64-static /bin/sh - # (add -v host-dir:guest-dir:rw arguments to mount volumes) - # - # To remove images in order to re-create: - # sudo docker rm -v $(sudo docker ps -a -q -f status=exited) - # sudo docker rmi netdata-package-x86_64-static - # - # This command maps the current directory to - # /usr/src/netdata.git - # inside the container and runs the script install-alpine-packages.sh - # (also inside the container) - # - run sudo docker run -v $(pwd):/usr/src/netdata.git:rw alpine:3.7 \ - /bin/sh /usr/src/netdata.git/makeself/install-alpine-packages.sh - - # save the changes made permanently - id=$(sudo docker ps -l -q) - run sudo docker commit ${id} "${DOCKER_CONTAINER_NAME}" -fi - -# Run the build script inside the container -run sudo docker run -a stdin -a stdout -a stderr -i -t -v \ - $(pwd):/usr/src/netdata.git:rw \ - "${DOCKER_CONTAINER_NAME}" \ - /bin/sh /usr/src/netdata.git/makeself/build.sh "${@}" - -if [ "${USER}" ] - then - sudo chown -R "${USER}" . -fi diff --git a/makeself/build.sh b/makeself/build.sh deleted file mode 100755 index e8c1c9ddc..000000000 --- a/makeself/build.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env sh -# SPDX-License-Identifier: GPL-3.0-or-later - -# ----------------------------------------------------------------------------- -# parse command line arguments - -export NETDATA_BUILD_WITH_DEBUG=0 - -while [ ! -z "${1}" ] -do - case "${1}" in - debug) - export NETDATA_BUILD_WITH_DEBUG=1 - ;; - - *) - ;; - esac - - shift -done - - -# ----------------------------------------------------------------------------- - -# First run install-alpine-packages.sh under alpine linux to install -# the required packages. build-x86_64-static.sh will do this for you -# using docker. - -cd $(dirname "$0") || exit 1 - -# if we don't run inside the netdata repo -# download it and run from it -if [ ! -f ../netdata-installer.sh ] -then - git clone https://github.com/netdata/netdata.git netdata.git || exit 1 - cd netdata.git/makeself || exit 1 - ./build.sh "$@" - exit $? -fi - -cat >&2 < " - -if [ ! -d tmp ] - then - mkdir tmp || exit 1 -fi - -./run-all-jobs.sh "$@" -exit $? diff --git a/makeself/functions.sh b/makeself/functions.sh deleted file mode 100755 index 10b324deb..000000000 --- a/makeself/functions.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -# ----------------------------------------------------------------------------- - -# allow running the jobs by hand -[ -z "${NETDATA_BUILD_WITH_DEBUG}" ] && export NETDATA_BUILD_WITH_DEBUG=0 -[ -z "${NETDATA_INSTALL_PATH}" ] && export NETDATA_INSTALL_PATH="${1-/opt/netdata}" -[ -z "${NETDATA_MAKESELF_PATH}" ] && export NETDATA_MAKESELF_PATH="$(dirname "${0}")/.." -[ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] && export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}" -[ -z "${NETDATA_SOURCE_PATH}" ] && export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/.." -export NULL= - -# make sure the path does not end with / -if [ "${NETDATA_INSTALL_PATH:$(( ${#NETDATA_INSTALL_PATH} - 1)):1}" = "/" ] - then - export NETDATA_INSTALL_PATH="${NETDATA_INSTALL_PATH:0:$(( ${#NETDATA_INSTALL_PATH} - 1))}" -fi - -# find the parent directory -export NETDATA_INSTALL_PARENT="$(dirname "${NETDATA_INSTALL_PATH}")" - -# ----------------------------------------------------------------------------- - -# bash strict mode -set -euo pipefail - -# ----------------------------------------------------------------------------- - -fetch() { - local dir="${1}" url="${2}" - local tar="${dir}.tar.gz" - - if [ ! -f "${NETDATA_MAKESELF_PATH}/tmp/${tar}" ] - then - run wget -O "${NETDATA_MAKESELF_PATH}/tmp/${tar}" "${url}" - fi - - if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ] - then - cd "${NETDATA_MAKESELF_PATH}/tmp" - run tar -zxpf "${tar}" - cd - - fi - - run cd "${NETDATA_MAKESELF_PATH}/tmp/${dir}" -} - -# ----------------------------------------------------------------------------- - -# load the functions of the netdata-installer.sh -. "${NETDATA_SOURCE_PATH}/installer/functions.sh" - -# ----------------------------------------------------------------------------- - -# debug -echo "ME=${0}" -echo "NETDATA_INSTALL_PARENT=${NETDATA_INSTALL_PARENT}" -echo "NETDATA_INSTALL_PATH=${NETDATA_INSTALL_PATH}" -echo "NETDATA_MAKESELF_PATH=${NETDATA_MAKESELF_PATH}" -echo "NETDATA_SOURCE_PATH=${NETDATA_SOURCE_PATH}" -echo "PROCESSORS=${SYSTEM_CPUS}" diff --git a/makeself/install-alpine-packages.sh b/makeself/install-alpine-packages.sh deleted file mode 100755 index 695be4d4f..000000000 --- a/makeself/install-alpine-packages.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env sh -# SPDX-License-Identifier: GPL-3.0-or-later - -# this script should be running in alpine linux -# install the required packages -apk update -apk add --no-cache \ - bash \ - wget \ - curl \ - ncurses \ - git \ - netcat-openbsd \ - alpine-sdk \ - autoconf \ - automake \ - gcc \ - make \ - libtool \ - pkgconfig \ - util-linux-dev \ - openssl-dev \ - gnutls-dev \ - zlib-dev \ - libmnl-dev \ - libnetfilter_acct-dev \ - || exit 1 diff --git a/makeself/install-or-update.sh b/makeself/install-or-update.sh deleted file mode 100755 index bfcbe720a..000000000 --- a/makeself/install-or-update.sh +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -. $(dirname "${0}")/functions.sh - -export LC_ALL=C -umask 002 - -# Be nice on production environments -renice 19 $$ >/dev/null 2>/dev/null - -# ----------------------------------------------------------------------------- - -STARTIT=1 - -while [ ! -z "${1}" ] -do - if [ "${1}" = "--dont-start-it" ] - then - STARTIT=0 - else - echo >&2 "Unknown option '${1}'. Ignoring it." - fi - shift -done - -deleted_stock_configs=0 -if [ ! -f "etc/netdata/.installer-cleanup-of-stock-configs-done" ] -then - - # ----------------------------------------------------------------------------- - progress "Deleting stock configuration files from user configuration directory" - - declare -A configs_signatures=() - source "system/configs.signatures" - - if [ ! -d etc/netdata ] - then - run mkdir -p etc/netdata - fi - - md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" - for x in $(find etc -type f) - do - # find it relative filename - f="${x/etc\/netdata\//}" - - # find the stock filename - t="${f/.conf.old/.conf}" - t="${t/.conf.orig/.conf}" - - if [ ! -z "${md5sum}" ] - then - # find the checksum of the existing file - md5="$( ${md5sum} <"${x}" | cut -d ' ' -f 1)" - #echo >&2 "md5: ${md5}" - - # check if it matches - if [ "${configs_signatures[${md5}]}" = "${t}" ] - then - # it matches the default - run rm -f "${x}" - deleted_stock_configs=$(( deleted_stock_configs + 1 )) - fi - fi - done - - touch "etc/netdata/.installer-cleanup-of-stock-configs-done" -fi - -# ----------------------------------------------------------------------------- -progress "Add user netdata to required user groups" - -NETDATA_USER="root" -NETDATA_GROUP="root" -add_netdata_user_and_group "/opt/netdata" -if [ $? -eq 0 ] - then - NETDATA_USER="netdata" - NETDATA_GROUP="netdata" -else - run_failed "Failed to add netdata user and group" -fi - - -# ----------------------------------------------------------------------------- -progress "Check SSL certificates paths" - -if [ ! -f "/etc/ssl/certs/ca-certificates.crt" ] -then - if [ ! -f /opt/netdata/.curlrc ] - then - cacert= - - # CentOS - [ -f "/etc/ssl/certs/ca-bundle.crt" ] && cacert="/etc/ssl/certs/ca-bundle.crt" - - if [ ! -z "${cacert}" ] - then - echo "Creating /opt/netdata/.curlrc with cacert=${cacert}" - echo >/opt/netdata/.curlrc "cacert=${cacert}" - else - run_failed "Failed to find /etc/ssl/certs/ca-certificates.crt" - fi - fi -fi - - -# ----------------------------------------------------------------------------- -progress "Install logrotate configuration for netdata" - -install_netdata_logrotate || run_failed "Cannot install logrotate file for netdata." - - -# ----------------------------------------------------------------------------- -progress "Install netdata at system init" - -install_netdata_service || run_failed "Cannot install netdata init service." - - -# ----------------------------------------------------------------------------- -progress "creating quick links" - -dir_should_be_link() { - local p="${1}" t="${2}" d="${3}" old - - old="${PWD}" - cd "${p}" || return 0 - - if [ -e "${d}" ] - then - if [ -h "${d}" ] - then - run rm "${d}" - else - run mv -f "${d}" "${d}.old.$$" - fi - fi - - run ln -s "${t}" "${d}" - cd "${old}" -} - -dir_should_be_link . bin sbin -dir_should_be_link usr ../bin bin -dir_should_be_link usr ../bin sbin -dir_should_be_link usr . local - -dir_should_be_link . etc/netdata netdata-configs -dir_should_be_link . usr/share/netdata/web netdata-web-files -dir_should_be_link . usr/libexec/netdata netdata-plugins -dir_should_be_link . var/lib/netdata netdata-dbs -dir_should_be_link . var/cache/netdata netdata-metrics -dir_should_be_link . var/log/netdata netdata-logs - -dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d orig - -if [ ${deleted_stock_configs} -gt 0 ] -then - dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d "000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES" -fi - - -# ----------------------------------------------------------------------------- - -progress "create user config directories" - -for x in "python.d" "charts.d" "node.d" "health.d" "statsd.d" -do - if [ ! -d "etc/netdata/${x}" ] - then - run mkdir -p "etc/netdata/${x}" || exit 1 - fi -done - - -# ----------------------------------------------------------------------------- -progress "fix permissions" - -run chmod g+rx,o+rx /opt -run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata - - -# ----------------------------------------------------------------------------- - -progress "fix plugin permissions" - -for x in apps.plugin freeipmi.plugin cgroup-network -do - f="usr/libexec/netdata/plugins.d/${x}" - - if [ -f "${f}" ] - then - run chown root:${NETDATA_GROUP} "${f}" - run chmod 4750 "${f}" - fi -done - -# fix the fping binary -if [ -f bin/fping ] -then - run chown root:${NETDATA_GROUP} bin/fping - run chmod 4750 bin/fping -fi - - -# ----------------------------------------------------------------------------- - -if [ ${STARTIT} -eq 1 ] -then - progress "starting netdata" - - restart_netdata "/opt/netdata/bin/netdata" - if [ $? -eq 0 ] - then - download_netdata_conf "${NETDATA_USER}:${NETDATA_GROUP}" "/opt/netdata/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf" - netdata_banner "is installed and running now!" - else - generate_netdata_conf "${NETDATA_USER}:${NETDATA_GROUP}" "/opt/netdata/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf" - netdata_banner "is installed now!" - fi -else - generate_netdata_conf "${NETDATA_USER}:${NETDATA_GROUP}" "/opt/netdata/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf" - netdata_banner "is installed now!" -fi diff --git a/makeself/jobs/10-prepare-destination.install.sh b/makeself/jobs/10-prepare-destination.install.sh deleted file mode 100755 index 019732636..000000000 --- a/makeself/jobs/10-prepare-destination.install.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -. $(dirname "${0}")/../functions.sh "${@}" || exit 1 - -[ -d "${NETDATA_INSTALL_PATH}.old" ] && run rm -rf "${NETDATA_INSTALL_PATH}.old" -[ -d "${NETDATA_INSTALL_PATH}" ] && run mv -f "${NETDATA_INSTALL_PATH}" "${NETDATA_INSTALL_PATH}.old" - -run mkdir -p "${NETDATA_INSTALL_PATH}/bin" -run mkdir -p "${NETDATA_INSTALL_PATH}/usr" -run cd "${NETDATA_INSTALL_PATH}" -run ln -s bin sbin -run cd "${NETDATA_INSTALL_PATH}/usr" -run ln -s ../bin bin -run ln -s ../sbin sbin -run ln -s . local - diff --git a/makeself/jobs/50-bash-4.4.18.install.sh b/makeself/jobs/50-bash-4.4.18.install.sh deleted file mode 100755 index 000765825..000000000 --- a/makeself/jobs/50-bash-4.4.18.install.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -. $(dirname "${0}")/../functions.sh "${@}" || exit 1 - -fetch "bash-4.4.18" "http://ftp.gnu.org/gnu/bash/bash-4.4.18.tar.gz" - -run ./configure \ - --prefix=${NETDATA_INSTALL_PATH} \ - --without-bash-malloc \ - --enable-static-link \ - --enable-net-redirections \ - --enable-array-variables \ - --disable-profiling \ - --disable-nls \ -# --disable-rpath \ -# --enable-alias \ -# --enable-arith-for-command \ -# --enable-array-variables \ -# --enable-brace-expansion \ -# --enable-casemod-attributes \ -# --enable-casemod-expansions \ -# --enable-command-timing \ -# --enable-cond-command \ -# --enable-cond-regexp \ -# --enable-directory-stack \ -# --enable-dparen-arithmetic \ -# --enable-function-import \ -# --enable-glob-asciiranges-default \ -# --enable-help-builtin \ -# --enable-job-control \ -# --enable-net-redirections \ -# --enable-process-substitution \ -# --enable-progcomp \ -# --enable-prompt-string-decoding \ -# --enable-readline \ -# --enable-select \ - - -run make clean -run make -j${SYSTEM_CPUS} - -cat >examples/loadables/Makefile <doc/Makefile <"${NETDATA_INSTALL_PATH}/bin/netdata" <"${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" - -run "${NETDATA_MAKESELF_PATH}/makeself.sh" \ - --gzip \ - --complevel 9 \ - --notemp \ - --needroot \ - --target "${NETDATA_INSTALL_PATH}" \ - --header "${NETDATA_MAKESELF_PATH}/makeself-header.sh" \ - --lsm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" \ - --license "${NETDATA_MAKESELF_PATH}/makeself-license.txt" \ - --help-header "${NETDATA_MAKESELF_PATH}/makeself-help-header.txt" \ - "${NETDATA_INSTALL_PATH}" \ - "${NETDATA_INSTALL_PATH}.gz.run" \ - "netdata, the real-time performance and health monitoring system" \ - ./system/post-installer.sh \ - ${NULL} - -run rm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" - -# ----------------------------------------------------------------------------- -# copy it to the netdata build dir - -FILE="netdata-${FILE_VERSION}.gz.run" - -run cp "${NETDATA_INSTALL_PATH}.gz.run" "${FILE}" -echo >&2 "Self-extracting installer copied to '${FILE}'" - -[ -f netdata-latest.gz.run ] && rm netdata-latest.gz.run -run ln -s "${FILE}" netdata-latest.gz.run -echo >&2 "Self-extracting installer linked to 'netdata-latest.gz.run'" diff --git a/makeself/makeself-header.sh b/makeself/makeself-header.sh deleted file mode 100755 index 19c1c3f99..000000000 --- a/makeself/makeself-header.sh +++ /dev/null @@ -1,554 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later -cat << EOF > "$archname" -#!/bin/sh -# This script was generated using Makeself $MS_VERSION - -ORIG_UMASK=\`umask\` -if test "$KEEP_UMASK" = n; then - umask 077 -fi - -CRCsum="$CRCsum" -MD5="$MD5sum" -TMPROOT=\${TMPDIR:=/tmp} -USER_PWD="\$PWD"; export USER_PWD - -label="$LABEL" -script="$SCRIPT" -scriptargs="$SCRIPTARGS" -licensetxt="$LICENSE" -helpheader='$HELPHEADER' -targetdir="$archdirname" -filesizes="$filesizes" -keep="$KEEP" -nooverwrite="$NOOVERWRITE" -quiet="n" -accept="n" -nodiskspace="n" -export_conf="$EXPORT_CONF" - -print_cmd_arg="" -if type printf > /dev/null; then - print_cmd="printf" -elif test -x /usr/ucb/echo; then - print_cmd="/usr/ucb/echo" -else - print_cmd="echo" -fi - -if test -d /usr/xpg4/bin; then - PATH=/usr/xpg4/bin:\$PATH - export PATH -fi - -unset CDPATH - -MS_Printf() -{ - \$print_cmd \$print_cmd_arg "\$1" -} - -MS_PrintLicense() -{ - if test x"\$licensetxt" != x; then - echo "\$licensetxt" - if test x"\$accept" != xy; then - while true - do - MS_Printf "Please type y to accept, n otherwise: " - read yn - if test x"\$yn" = xn; then - keep=n - eval \$finish; exit 1 - break; - elif test x"\$yn" = xy; then - break; - fi - done - fi - fi -} - -MS_diskspace() -{ - ( - df -kP "\$1" | tail -1 | awk '{ if (\$4 ~ /%/) {print \$3} else {print \$4} }' - ) -} - -MS_dd() -{ - blocks=\`expr \$3 / 1024\` - bytes=\`expr \$3 % 1024\` - dd if="\$1" ibs=\$2 skip=1 obs=1024 conv=sync 2> /dev/null | \\ - { test \$blocks -gt 0 && dd ibs=1024 obs=1024 count=\$blocks ; \\ - test \$bytes -gt 0 && dd ibs=1 obs=1024 count=\$bytes ; } 2> /dev/null -} - -MS_dd_Progress() -{ - if test x"\$noprogress" = xy; then - MS_dd \$@ - return \$? - fi - file="\$1" - offset=\$2 - length=\$3 - pos=0 - bsize=4194304 - while test \$bsize -gt \$length; do - bsize=\`expr \$bsize / 4\` - done - blocks=\`expr \$length / \$bsize\` - bytes=\`expr \$length % \$bsize\` - ( - dd ibs=\$offset skip=1 2>/dev/null - pos=\`expr \$pos \+ \$bsize\` - MS_Printf " 0%% " 1>&2 - if test \$blocks -gt 0; then - while test \$pos -le \$length; do - dd bs=\$bsize count=1 2>/dev/null - pcent=\`expr \$length / 100\` - pcent=\`expr \$pos / \$pcent\` - if test \$pcent -lt 100; then - MS_Printf "\b\b\b\b\b\b\b" 1>&2 - if test \$pcent -lt 10; then - MS_Printf " \$pcent%% " 1>&2 - else - MS_Printf " \$pcent%% " 1>&2 - fi - fi - pos=\`expr \$pos \+ \$bsize\` - done - fi - if test \$bytes -gt 0; then - dd bs=\$bytes count=1 2>/dev/null - fi - MS_Printf "\b\b\b\b\b\b\b" 1>&2 - MS_Printf " 100%% " 1>&2 - ) < "\$file" -} - -MS_Help() -{ - cat << EOH >&2 -\${helpheader}Makeself version $MS_VERSION - 1) Getting help or info about \$0 : - \$0 --help Print this message - \$0 --info Print embedded info : title, default target directory, embedded script ... - \$0 --lsm Print embedded lsm entry (or no LSM) - \$0 --list Print the list of files in the archive - \$0 --check Checks integrity of the archive - - 2) Running \$0 : - \$0 [options] [--] [additional arguments to embedded script] - with following options (in that order) - --confirm Ask before running embedded script - --quiet Do not print anything except error messages - --accept Accept the license - --noexec Do not run embedded script - --keep Do not erase target directory after running - the embedded script - --noprogress Do not show the progress during the decompression - --nox11 Do not spawn an xterm - --nochown Do not give the extracted files to the current user - --nodiskspace Do not check for available disk space - --target dir Extract directly to a target directory - directory path can be either absolute or relative - --tar arg1 [arg2 ...] Access the contents of the archive through the tar command - -- Following arguments will be passed to the embedded script -EOH -} - -MS_Check() -{ - OLD_PATH="\$PATH" - PATH=\${GUESS_MD5_PATH:-"\$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"} - MD5_ARG="" - MD5_PATH=\`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum\` - test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which md5 || command -v md5 || type md5\` - test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which digest || command -v digest || type digest\` - PATH="\$OLD_PATH" - - if test x"\$quiet" = xn; then - MS_Printf "Verifying archive integrity..." - fi - offset=\`head -n $SKIP "\$1" | wc -c | tr -d " "\` - verb=\$2 - i=1 - for s in \$filesizes - do - crc=\`echo \$CRCsum | cut -d" " -f\$i\` - if test -x "\$MD5_PATH"; then - if test x"\`basename \$MD5_PATH\`" = xdigest; then - MD5_ARG="-a md5" - fi - md5=\`echo \$MD5 | cut -d" " -f\$i\` - if test x"\$md5" = x00000000000000000000000000000000; then - test x"\$verb" = xy && echo " \$1 does not contain an embedded MD5 checksum." >&2 - else - md5sum=\`MS_dd_Progress "\$1" \$offset \$s | eval "\$MD5_PATH \$MD5_ARG" | cut -b-32\`; - if test x"\$md5sum" != x"\$md5"; then - echo "Error in MD5 checksums: \$md5sum is different from \$md5" >&2 - exit 2 - else - test x"\$verb" = xy && MS_Printf " MD5 checksums are OK." >&2 - fi - crc="0000000000"; verb=n - fi - fi - if test x"\$crc" = x0000000000; then - test x"\$verb" = xy && echo " \$1 does not contain a CRC checksum." >&2 - else - sum1=\`MS_dd_Progress "\$1" \$offset \$s | CMD_ENV=xpg4 cksum | awk '{print \$1}'\` - if test x"\$sum1" = x"\$crc"; then - test x"\$verb" = xy && MS_Printf " CRC checksums are OK." >&2 - else - echo "Error in checksums: \$sum1 is different from \$crc" >&2 - exit 2; - fi - fi - i=\`expr \$i + 1\` - offset=\`expr \$offset + \$s\` - done - if test x"\$quiet" = xn; then - echo " All good." - fi -} - -UnTAR() -{ - if test x"\$quiet" = xn; then - tar \$1vf - $UNTAR_EXTRA 2>&1 || { echo " ... Extraction failed." > /dev/tty; kill -15 \$$; } - else - tar \$1f - $UNTAR_EXTRA 2>&1 || { echo Extraction failed. > /dev/tty; kill -15 \$$; } - fi -} - -finish=true -xterm_loop= -noprogress=$NOPROGRESS -nox11=$NOX11 -copy=$COPY -ownership=y -verbose=n - -initargs="\$@" - -while true -do - case "\$1" in - -h | --help) - MS_Help - exit 0 - ;; - -q | --quiet) - quiet=y - noprogress=y - shift - ;; - --accept) - accept=y - shift - ;; - --info) - echo Identification: "\$label" - echo Target directory: "\$targetdir" - echo Uncompressed size: $USIZE KB - echo Compression: $COMPRESS - echo Date of packaging: $DATE - echo Built with Makeself version $MS_VERSION on $OSTYPE - echo Build command was: "$MS_COMMAND" - if test x"\$script" != x; then - echo Script run after extraction: - echo " " \$script \$scriptargs - fi - if test x"$copy" = xcopy; then - echo "Archive will copy itself to a temporary location" - fi - if test x"$NEED_ROOT" = xy; then - echo "Root permissions required for extraction" - fi - if test x"$KEEP" = xy; then - echo "directory \$targetdir is permanent" - else - echo "\$targetdir will be removed after extraction" - fi - exit 0 - ;; - --dumpconf) - echo LABEL=\"\$label\" - echo SCRIPT=\"\$script\" - echo SCRIPTARGS=\"\$scriptargs\" - echo archdirname=\"$archdirname\" - echo KEEP=$KEEP - echo NOOVERWRITE=$NOOVERWRITE - echo COMPRESS=$COMPRESS - echo filesizes=\"\$filesizes\" - echo CRCsum=\"\$CRCsum\" - echo MD5sum=\"\$MD5\" - echo OLDUSIZE=$USIZE - echo OLDSKIP=`expr $SKIP + 1` - exit 0 - ;; - --lsm) -cat << EOLSM -EOF -eval "$LSM_CMD" -cat << EOF >> "$archname" -EOLSM - exit 0 - ;; - --list) - echo Target directory: \$targetdir - offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\` - for s in \$filesizes - do - MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | UnTAR t - offset=\`expr \$offset + \$s\` - done - exit 0 - ;; - --tar) - offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\` - arg1="\$2" - if ! shift 2; then MS_Help; exit 1; fi - for s in \$filesizes - do - MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | tar "\$arg1" - "\$@" - offset=\`expr \$offset + \$s\` - done - exit 0 - ;; - --check) - MS_Check "\$0" y - exit 0 - ;; - --confirm) - verbose=y - shift - ;; - --noexec) - script="" - shift - ;; - --keep) - keep=y - shift - ;; - --target) - keep=y - targetdir=\${2:-.} - if ! shift 2; then MS_Help; exit 1; fi - ;; - --noprogress) - noprogress=y - shift - ;; - --nox11) - nox11=y - shift - ;; - --nochown) - ownership=n - shift - ;; - --nodiskspace) - nodiskspace=y - shift - ;; - --xwin) - if test "$NOWAIT" = n; then - finish="echo Press Return to close this window...; read junk" - fi - xterm_loop=1 - shift - ;; - --phase2) - copy=phase2 - shift - ;; - --) - shift - break ;; - -*) - echo Unrecognized flag : "\$1" >&2 - MS_Help - exit 1 - ;; - *) - break ;; - esac -done - -if test x"\$quiet" = xy -a x"\$verbose" = xy; then - echo Cannot be verbose and quiet at the same time. >&2 - exit 1 -fi - -if test x"$NEED_ROOT" = xy -a \`id -u\` -ne 0; then - echo "Administrative privileges required for this archive (use su or sudo)" >&2 - exit 1 -fi - -if test x"\$copy" \!= xphase2; then - MS_PrintLicense -fi - -case "\$copy" in -copy) - tmpdir=\$TMPROOT/makeself.\$RANDOM.\`date +"%y%m%d%H%M%S"\`.\$\$ - mkdir "\$tmpdir" || { - echo "Could not create temporary directory \$tmpdir" >&2 - exit 1 - } - SCRIPT_COPY="\$tmpdir/makeself" - echo "Copying to a temporary location..." >&2 - cp "\$0" "\$SCRIPT_COPY" - chmod +x "\$SCRIPT_COPY" - cd "\$TMPROOT" - exec "\$SCRIPT_COPY" --phase2 -- \$initargs - ;; -phase2) - finish="\$finish ; rm -rf \`dirname \$0\`" - ;; -esac - -if test x"\$nox11" = xn; then - if tty -s; then # Do we have a terminal? - : - else - if test x"\$DISPLAY" != x -a x"\$xterm_loop" = x; then # No, but do we have X? - if xset q > /dev/null 2>&1; then # Check for valid DISPLAY variable - GUESS_XTERMS="xterm gnome-terminal rxvt dtterm eterm Eterm xfce4-terminal lxterminal kvt konsole aterm terminology" - for a in \$GUESS_XTERMS; do - if type \$a >/dev/null 2>&1; then - XTERM=\$a - break - fi - done - chmod a+x \$0 || echo Please add execution rights on \$0 - if test \`echo "\$0" | cut -c1\` = "/"; then # Spawn a terminal! - exec \$XTERM -title "\$label" -e "\$0" --xwin "\$initargs" - else - exec \$XTERM -title "\$label" -e "./\$0" --xwin "\$initargs" - fi - fi - fi - fi -fi - -if test x"\$targetdir" = x.; then - tmpdir="." -else - if test x"\$keep" = xy; then - if test x"\$nooverwrite" = xy && test -d "\$targetdir"; then - echo "Target directory \$targetdir already exists, aborting." >&2 - exit 1 - fi - if test x"\$quiet" = xn; then - echo "Creating directory \$targetdir" >&2 - fi - tmpdir="\$targetdir" - dashp="-p" - else - tmpdir="\$TMPROOT/selfgz\$\$\$RANDOM" - dashp="" - fi - mkdir \$dashp \$tmpdir || { - echo 'Cannot create target directory' \$tmpdir >&2 - echo 'You should try option --target dir' >&2 - eval \$finish - exit 1 - } -fi - -location="\`pwd\`" -if test x"\$SETUP_NOCHECK" != x1; then - MS_Check "\$0" -fi -offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\` - -if test x"\$verbose" = xy; then - MS_Printf "About to extract $USIZE KB in \$tmpdir ... Proceed ? [Y/n] " - read yn - if test x"\$yn" = xn; then - eval \$finish; exit 1 - fi -fi - -if test x"\$quiet" = xn; then - MS_Printf "Uncompressing \$label" -fi -res=3 -if test x"\$keep" = xn; then - trap 'echo Signal caught, cleaning up >&2; cd \$TMPROOT; /bin/rm -rf \$tmpdir; eval \$finish; exit 15' 1 2 3 15 -fi - -if test x"\$nodiskspace" = xn; then - leftspace=\`MS_diskspace \$tmpdir\` - if test -n "\$leftspace"; then - if test "\$leftspace" -lt $USIZE; then - echo - echo "Not enough space left in "\`dirname \$tmpdir\`" (\$leftspace KB) to decompress \$0 ($USIZE KB)" >&2 - echo "Use --nodiskspace option to skip this check and proceed anyway" >&2 - if test x"\$keep" = xn; then - echo "Consider setting TMPDIR to a directory with more free space." - fi - eval \$finish; exit 1 - fi - fi -fi - -for s in \$filesizes -do - if MS_dd_Progress "\$0" \$offset \$s | eval "$GUNZIP_CMD" | ( cd "\$tmpdir"; umask \$ORIG_UMASK ; UnTAR xp ) 1>/dev/null; then - if test x"\$ownership" = xy; then - (cd "\$tmpdir"; chown -R \`id -u\` .; chgrp -R \`id -g\` .) - fi - else - echo >&2 - echo "Unable to decompress \$0" >&2 - eval \$finish; exit 1 - fi - offset=\`expr \$offset + \$s\` -done -if test x"\$quiet" = xn; then - echo -fi - -cd "\$tmpdir" -res=0 -if test x"\$script" != x; then - if test x"\$export_conf" = x"y"; then - MS_BUNDLE="\$0" - MS_LABEL="\$label" - MS_SCRIPT="\$script" - MS_SCRIPTARGS="\$scriptargs" - MS_ARCHDIRNAME="\$archdirname" - MS_KEEP="\$KEEP" - MS_NOOVERWRITE="\$NOOVERWRITE" - MS_COMPRESS="\$COMPRESS" - export MS_BUNDLE MS_LABEL MS_SCRIPT MS_SCRIPTARGS - export MS_ARCHDIRNAME MS_KEEP MS_NOOVERWRITE MS_COMPRESS - fi - - if test x"\$verbose" = x"y"; then - MS_Printf "OK to execute: \$script \$scriptargs \$* ? [Y/n] " - read yn - if test x"\$yn" = x -o x"\$yn" = xy -o x"\$yn" = xY; then - eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?; - fi - else - eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$? - fi - if test "\$res" -ne 0; then - test x"\$verbose" = xy && echo "The program '\$script' returned an error code (\$res)" >&2 - fi -fi -if test x"\$keep" = xn; then - cd \$TMPROOT - /bin/rm -rf \$tmpdir -fi -eval \$finish; exit \$res -EOF diff --git a/makeself/makeself-help-header.txt b/makeself/makeself-help-header.txt deleted file mode 100644 index 6e9e96237..000000000 --- a/makeself/makeself-help-header.txt +++ /dev/null @@ -1,46 +0,0 @@ - - ^ - |.-. .-. .-. .-. . netdata - | '-' '-' '-' '-' real-time performance monitoring, done right! - +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---> - - (C) Copyright 2017, Costa Tsaousis - All rights reserved - Released under GPL v3+ - - You are about to install netdata to this system. - netdata will be installed at: - - /opt/netdata - - The following changes will be made to your system: - - # USERS / GROUPS - User 'netdata' and group 'netdata' will be added, if not present. - - # LOGROTATE - This file will be installed if logrotate is present. - - - /etc/logrotate.d/netdata - - # SYSTEM INIT - This file will be installed if this system runs with systemd: - - - /lib/systemd/system/netdata.service - - or, for older Centos, Debian/Ubuntu or OpenRC Gentoo: - - - /etc/init.d/netdata will be created - - - This package can also update a netdata installation that has been - created with another version of it. - - Your netdata configuration will be retained. - After installation, netdata will be (re-)started. - - netdata re-distributes a lot of open source software components. - Check its full license at: - https://github.com/netdata/netdata/blob/master/LICENSE.md - - diff --git a/makeself/makeself-license.txt b/makeself/makeself-license.txt deleted file mode 100644 index 6e9e96237..000000000 --- a/makeself/makeself-license.txt +++ /dev/null @@ -1,46 +0,0 @@ - - ^ - |.-. .-. .-. .-. . netdata - | '-' '-' '-' '-' real-time performance monitoring, done right! - +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---> - - (C) Copyright 2017, Costa Tsaousis - All rights reserved - Released under GPL v3+ - - You are about to install netdata to this system. - netdata will be installed at: - - /opt/netdata - - The following changes will be made to your system: - - # USERS / GROUPS - User 'netdata' and group 'netdata' will be added, if not present. - - # LOGROTATE - This file will be installed if logrotate is present. - - - /etc/logrotate.d/netdata - - # SYSTEM INIT - This file will be installed if this system runs with systemd: - - - /lib/systemd/system/netdata.service - - or, for older Centos, Debian/Ubuntu or OpenRC Gentoo: - - - /etc/init.d/netdata will be created - - - This package can also update a netdata installation that has been - created with another version of it. - - Your netdata configuration will be retained. - After installation, netdata will be (re-)started. - - netdata re-distributes a lot of open source software components. - Check its full license at: - https://github.com/netdata/netdata/blob/master/LICENSE.md - - diff --git a/makeself/makeself.lsm b/makeself/makeself.lsm deleted file mode 100644 index 6bd4703db..000000000 --- a/makeself/makeself.lsm +++ /dev/null @@ -1,16 +0,0 @@ -Begin3 -Title: netdata -Version: NETDATA_VERSION -Description: netdata is a system for distributed real-time performance and health monitoring. - It provides unparalleled insights, in real-time, of everything happening on the - system it runs (including applications such as web and database servers), using - modern interactive web dashboards. netdata is fast and efficient, designed to - permanently run on all systems (physical & virtual servers, containers, IoT - devices), without disrupting their core function. -Keywords: real-time performance and health monitoring -Author: Costa Tsaousis (costa@tsaousis.gr) -Maintained-by: Costa Tsaousis (costa@tsaousis.gr) -Original-site: https://my-netdata.io/ -Platform: Unix -Copying-policy: GPL -End diff --git a/makeself/makeself.sh b/makeself/makeself.sh deleted file mode 100755 index ee89df9a4..000000000 --- a/makeself/makeself.sh +++ /dev/null @@ -1,621 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-3.0-or-later -# -# Makeself version 2.3.x -# by Stephane Peter -# -# Utility to create self-extracting tar.gz archives. -# The resulting archive is a file holding the tar.gz archive with -# a small Shell script stub that uncompresses the archive to a temporary -# directory and then executes a given script from withing that directory. -# -# Makeself home page: http://makeself.io/ -# -# Version 2.0 is a rewrite of version 1.0 to make the code easier to read and maintain. -# -# Version history : -# - 1.0 : Initial public release -# - 1.1 : The archive can be passed parameters that will be passed on to -# the embedded script, thanks to John C. Quillan -# - 1.2 : Package distribution, bzip2 compression, more command line options, -# support for non-temporary archives. Ideas thanks to Francois Petitjean -# - 1.3 : More patches from Bjarni R. Einarsson and Francois Petitjean: -# Support for no compression (--nocomp), script is no longer mandatory, -# automatic launch in an xterm, optional verbose output, and -target -# archive option to indicate where to extract the files. -# - 1.4 : Improved UNIX compatibility (Francois Petitjean) -# Automatic integrity checking, support of LSM files (Francois Petitjean) -# - 1.5 : Many bugfixes. Optionally disable xterm spawning. -# - 1.5.1 : More bugfixes, added archive options -list and -check. -# - 1.5.2 : Cosmetic changes to inform the user of what's going on with big -# archives (Quake III demo) -# - 1.5.3 : Check for validity of the DISPLAY variable before launching an xterm. -# More verbosity in xterms and check for embedded command's return value. -# Bugfix for Debian 2.0 systems that have a different "print" command. -# - 1.5.4 : Many bugfixes. Print out a message if the extraction failed. -# - 1.5.5 : More bugfixes. Added support for SETUP_NOCHECK environment variable to -# bypass checksum verification of archives. -# - 1.6.0 : Compute MD5 checksums with the md5sum command (patch from Ryan Gordon) -# - 2.0 : Brand new rewrite, cleaner architecture, separated header and UNIX ports. -# - 2.0.1 : Added --copy -# - 2.1.0 : Allow multiple tarballs to be stored in one archive, and incremental updates. -# Added --nochown for archives -# Stopped doing redundant checksums when not necesary -# - 2.1.1 : Work around insane behavior from certain Linux distros with no 'uncompress' command -# Cleaned up the code to handle error codes from compress. Simplified the extraction code. -# - 2.1.2 : Some bug fixes. Use head -n to avoid problems. -# - 2.1.3 : Bug fixes with command line when spawning terminals. -# Added --tar for archives, allowing to give arbitrary arguments to tar on the contents of the archive. -# Added --noexec to prevent execution of embedded scripts. -# Added --nomd5 and --nocrc to avoid creating checksums in archives. -# Added command used to create the archive in --info output. -# Run the embedded script through eval. -# - 2.1.4 : Fixed --info output. -# Generate random directory name when extracting files to . to avoid problems. (Jason Trent) -# Better handling of errors with wrong permissions for the directory containing the files. (Jason Trent) -# Avoid some race conditions (Ludwig Nussel) -# Unset the $CDPATH variable to avoid problems if it is set. (Debian) -# Better handling of dot files in the archive directory. -# - 2.1.5 : Made the md5sum detection consistent with the header code. -# Check for the presence of the archive directory -# Added --encrypt for symmetric encryption through gpg (Eric Windisch) -# Added support for the digest command on Solaris 10 for MD5 checksums -# Check for available disk space before extracting to the target directory (Andreas Schweitzer) -# Allow extraction to run asynchronously (patch by Peter Hatch) -# Use file descriptors internally to avoid error messages (patch by Kay Tiong Khoo) -# - 2.1.6 : Replaced one dot per file progress with a realtime progress percentage and a spining cursor (Guy Baconniere) -# Added --noprogress to prevent showing the progress during the decompression (Guy Baconniere) -# Added --target dir to allow extracting directly to a target directory (Guy Baconniere) -# - 2.2.0 : Many bugfixes, updates and contributions from users. Check out the project page on Github for the details. -# - 2.3.0 : Option to specify packaging date to enable byte-for-byte reproducibility. (Marc Pawlowsky) -# -# (C) 1998-2017 by Stephane Peter -# -# This software is released under the terms of the GNU GPL version 2 and above -# Please read the license at http://www.gnu.org/copyleft/gpl.html -# - -MS_VERSION=2.3.1 -MS_COMMAND="$0" -unset CDPATH - -for f in "${1+"$@"}"; do - MS_COMMAND="$MS_COMMAND \\\\ - \\\"$f\\\"" -done - -# For Solaris systems -if test -d /usr/xpg4/bin; then - PATH=/usr/xpg4/bin:$PATH - export PATH -fi - -# Procedures - -MS_Usage() -{ - echo "Usage: $0 [params] archive_dir file_name label startup_script [args]" - echo "params can be one or more of the following :" - echo " --version | -v : Print out Makeself version number and exit" - echo " --help | -h : Print out this help message" - echo " --tar-quietly : Suppress verbose output from the tar command" - echo " --quiet | -q : Do not print any messages other than errors." - echo " --gzip : Compress using gzip (default if detected)" - echo " --pigz : Compress with pigz" - echo " --bzip2 : Compress using bzip2 instead of gzip" - echo " --pbzip2 : Compress using pbzip2 instead of gzip" - echo " --xz : Compress using xz instead of gzip" - echo " --lzo : Compress using lzop instead of gzip" - echo " --lz4 : Compress using lz4 instead of gzip" - echo " --compress : Compress using the UNIX 'compress' command" - echo " --complevel lvl : Compression level for gzip pigz xz lzo lz4 bzip2 and pbzip2 (default 9)" - echo " --base64 : Instead of compressing, encode the data using base64" - echo " --gpg-encrypt : Instead of compressing, encrypt the data using GPG" - echo " --gpg-asymmetric-encrypt-sign" - echo " : Instead of compressing, asymmetrically encrypt and sign the data using GPG" - echo " --gpg-extra opt : Append more options to the gpg command line" - echo " --ssl-encrypt : Instead of compressing, encrypt the data using OpenSSL" - echo " --nocomp : Do not compress the data" - echo " --notemp : The archive will create archive_dir in the" - echo " current directory and uncompress in ./archive_dir" - echo " --needroot : Check that the root user is extracting the archive before proceeding" - echo " --copy : Upon extraction, the archive will first copy itself to" - echo " a temporary directory" - echo " --append : Append more files to an existing Makeself archive" - echo " The label and startup scripts will then be ignored" - echo " --target dir : Extract directly to a target directory" - echo " directory path can be either absolute or relative" - echo " --nooverwrite : Do not extract the archive if the specified target directory exists" - echo " --current : Files will be extracted to the current directory" - echo " Both --current and --target imply --notemp" - echo " --tar-extra opt : Append more options to the tar command line" - echo " --untar-extra opt : Append more options to the during the extraction of the tar archive" - echo " --nomd5 : Don't calculate an MD5 for archive" - echo " --nocrc : Don't calculate a CRC for archive" - echo " --header file : Specify location of the header script" - echo " --follow : Follow the symlinks in the archive" - echo " --noprogress : Do not show the progress during the decompression" - echo " --nox11 : Disable automatic spawn of a xterm" - echo " --nowait : Do not wait for user input after executing embedded" - echo " program from an xterm" - echo " --lsm file : LSM file describing the package" - echo " --license file : Append a license file" - echo " --help-header file : Add a header to the archive's --help output" - echo " --packaging-date date" - echo " : Use provided string as the packaging date" - echo " instead of the current date." - echo - echo " --keep-umask : Keep the umask set to shell default, rather than overriding when executing self-extracting archive." - echo " --export-conf : Export configuration variables to startup_script" - echo - echo "Do not forget to give a fully qualified startup script name" - echo "(i.e. with a ./ prefix if inside the archive)." - exit 1 -} - -# Default settings -if type gzip 2>&1 > /dev/null; then - COMPRESS=gzip -else - COMPRESS=Unix -fi -COMPRESS_LEVEL=9 -KEEP=n -CURRENT=n -NOX11=n -NOWAIT=n -APPEND=n -TAR_QUIETLY=n -KEEP_UMASK=n -QUIET=n -NOPROGRESS=n -COPY=none -NEED_ROOT=n -TAR_ARGS=cvf -TAR_EXTRA="" -GPG_EXTRA="" -DU_ARGS=-ks -HEADER=`dirname "$0"`/makeself-header.sh -TARGETDIR="" -NOOVERWRITE=n -DATE=`LC_ALL=C date` -EXPORT_CONF=n - -# LSM file stuff -LSM_CMD="echo No LSM. >> \"\$archname\"" - -while true -do - case "$1" in - --version | -v) - echo Makeself version $MS_VERSION - exit 0 - ;; - --pbzip2) - COMPRESS=pbzip2 - shift - ;; - --bzip2) - COMPRESS=bzip2 - shift - ;; - --gzip) - COMPRESS=gzip - shift - ;; - --pigz) - COMPRESS=pigz - shift - ;; - --xz) - COMPRESS=xz - shift - ;; - --lzo) - COMPRESS=lzo - shift - ;; - --lz4) - COMPRESS=lz4 - shift - ;; - --compress) - COMPRESS=Unix - shift - ;; - --base64) - COMPRESS=base64 - shift - ;; - --gpg-encrypt) - COMPRESS=gpg - shift - ;; - --gpg-asymmetric-encrypt-sign) - COMPRESS=gpg-asymmetric - shift - ;; - --gpg-extra) - GPG_EXTRA="$2" - if ! shift 2; then MS_Help; exit 1; fi - ;; - --ssl-encrypt) - COMPRESS=openssl - shift - ;; - --nocomp) - COMPRESS=none - shift - ;; - --complevel) - COMPRESS_LEVEL="$2" - if ! shift 2; then MS_Help; exit 1; fi - ;; - --notemp) - KEEP=y - shift - ;; - --copy) - COPY=copy - shift - ;; - --current) - CURRENT=y - KEEP=y - shift - ;; - --tar-extra) - TAR_EXTRA="$2" - if ! shift 2; then MS_Help; exit 1; fi - ;; - --untar-extra) - UNTAR_EXTRA="$2" - if ! shift 2; then MS_Help; exit 1; fi - ;; - --target) - TARGETDIR="$2" - KEEP=y - if ! shift 2; then MS_Help; exit 1; fi - ;; - --nooverwrite) - NOOVERWRITE=y - shift - ;; - --needroot) - NEED_ROOT=y - shift - ;; - --header) - HEADER="$2" - if ! shift 2; then MS_Help; exit 1; fi - ;; - --license) - LICENSE=`cat $2` - if ! shift 2; then MS_Help; exit 1; fi - ;; - --follow) - TAR_ARGS=cvhf - DU_ARGS=-ksL - shift - ;; - --noprogress) - NOPROGRESS=y - shift - ;; - --nox11) - NOX11=y - shift - ;; - --nowait) - NOWAIT=y - shift - ;; - --nomd5) - NOMD5=y - shift - ;; - --nocrc) - NOCRC=y - shift - ;; - --append) - APPEND=y - shift - ;; - --lsm) - LSM_CMD="cat \"$2\" >> \"\$archname\"" - if ! shift 2; then MS_Help; exit 1; fi - ;; - --packaging-date) - DATE="$2" - if ! shift 2; then MS_Help; exit 1; fi - ;; - --help-header) - HELPHEADER=`sed -e "s/'/'\\\\\''/g" $2` - if ! shift 2; then MS_Help; exit 1; fi - [ -n "$HELPHEADER" ] && HELPHEADER="$HELPHEADER -" - ;; - --tar-quietly) - TAR_QUIETLY=y - shift - ;; - --keep-umask) - KEEP_UMASK=y - shift - ;; - --export-conf) - EXPORT_CONF=y - shift - ;; - -q | --quiet) - QUIET=y - shift - ;; - -h | --help) - MS_Usage - ;; - -*) - echo Unrecognized flag : "$1" - MS_Usage - ;; - *) - break - ;; - esac -done - -if test $# -lt 1; then - MS_Usage -else - if test -d "$1"; then - archdir="$1" - else - echo "Directory $1 does not exist." >&2 - exit 1 - fi -fi -archname="$2" - -if test "$QUIET" = "y" || test "$TAR_QUIETLY" = "y"; then - if test "$TAR_ARGS" = "cvf"; then - TAR_ARGS="cf" - elif test "$TAR_ARGS" = "cvhf";then - TAR_ARGS="chf" - fi -fi - -if test "$APPEND" = y; then - if test $# -lt 2; then - MS_Usage - fi - - # Gather the info from the original archive - OLDENV=`sh "$archname" --dumpconf` - if test $? -ne 0; then - echo "Unable to update archive: $archname" >&2 - exit 1 - else - eval "$OLDENV" - fi -else - if test "$KEEP" = n -a $# = 3; then - echo "ERROR: Making a temporary archive with no embedded command does not make sense!" >&2 - echo >&2 - MS_Usage - fi - # We don't want to create an absolute directory unless a target directory is defined - if test "$CURRENT" = y; then - archdirname="." - elif test x$TARGETDIR != x; then - archdirname="$TARGETDIR" - else - archdirname=`basename "$1"` - fi - - if test $# -lt 3; then - MS_Usage - fi - - LABEL="$3" - SCRIPT="$4" - test "x$SCRIPT" = x || shift 1 - shift 3 - SCRIPTARGS="$*" -fi - -if test "$KEEP" = n -a "$CURRENT" = y; then - echo "ERROR: It is A VERY DANGEROUS IDEA to try to combine --notemp and --current." >&2 - exit 1 -fi - -case $COMPRESS in -gzip) - GZIP_CMD="gzip -c$COMPRESS_LEVEL" - GUNZIP_CMD="gzip -cd" - ;; -pigz) - GZIP_CMD="pigz -$COMPRESS_LEVEL" - GUNZIP_CMD="gzip -cd" - ;; -pbzip2) - GZIP_CMD="pbzip2 -c$COMPRESS_LEVEL" - GUNZIP_CMD="bzip2 -d" - ;; -bzip2) - GZIP_CMD="bzip2 -$COMPRESS_LEVEL" - GUNZIP_CMD="bzip2 -d" - ;; -xz) - GZIP_CMD="xz -c$COMPRESS_LEVEL" - GUNZIP_CMD="xz -d" - ;; -lzo) - GZIP_CMD="lzop -c$COMPRESS_LEVEL" - GUNZIP_CMD="lzop -d" - ;; -lz4) - GZIP_CMD="lz4 -c$COMPRESS_LEVEL" - GUNZIP_CMD="lz4 -d" - ;; -base64) - GZIP_CMD="base64" - GUNZIP_CMD="base64 -d -i" - ;; -gpg) - GZIP_CMD="gpg $GPG_EXTRA -ac -z$COMPRESS_LEVEL" - GUNZIP_CMD="gpg -d" - ;; -gpg-asymmetric) - GZIP_CMD="gpg $GPG_EXTRA -z$COMPRESS_LEVEL -es" - GUNZIP_CMD="gpg --yes -d" - ;; -openssl) - GZIP_CMD="openssl aes-256-cbc -a -salt -md sha256" - GUNZIP_CMD="openssl aes-256-cbc -d -a -md sha256" - ;; -Unix) - GZIP_CMD="compress -cf" - GUNZIP_CMD="exec 2>&-; uncompress -c || test \\\$? -eq 2 || gzip -cd" - ;; -none) - GZIP_CMD="cat" - GUNZIP_CMD="cat" - ;; -esac - -tmpfile="${TMPDIR:=/tmp}/mkself$$" - -if test -f "$HEADER"; then - oldarchname="$archname" - archname="$tmpfile" - # Generate a fake header to count its lines - SKIP=0 - . "$HEADER" - SKIP=`cat "$tmpfile" |wc -l` - # Get rid of any spaces - SKIP=`expr $SKIP` - rm -f "$tmpfile" - if test "$QUIET" = "n";then - echo Header is $SKIP lines long >&2 - fi - - archname="$oldarchname" -else - echo "Unable to open header file: $HEADER" >&2 - exit 1 -fi - -if test "$QUIET" = "n";then - echo -fi - -if test "$APPEND" = n; then - if test -f "$archname"; then - echo "WARNING: Overwriting existing file: $archname" >&2 - fi -fi - -USIZE=`du $DU_ARGS "$archdir" | awk '{print $1}'` - -if test "." = "$archdirname"; then - if test "$KEEP" = n; then - archdirname="makeself-$$-`date +%Y%m%d%H%M%S`" - fi -fi - -test -d "$archdir" || { echo "Error: $archdir does not exist."; rm -f "$tmpfile"; exit 1; } -if test "$QUIET" = "n";then - echo About to compress $USIZE KB of data... - echo Adding files to archive named \"$archname\"... -fi -exec 3<> "$tmpfile" -( cd "$archdir" && ( tar $TAR_EXTRA -$TAR_ARGS - . | eval "$GZIP_CMD" >&3 ) ) || \ - { echo Aborting: archive directory not found or temporary file: "$tmpfile" could not be created.; exec 3>&-; rm -f "$tmpfile"; exit 1; } -exec 3>&- # try to close the archive - -fsize=`cat "$tmpfile" | wc -c | tr -d " "` - -# Compute the checksums - -md5sum=00000000000000000000000000000000 -crcsum=0000000000 - -if test "$NOCRC" = y; then - if test "$QUIET" = "n";then - echo "skipping crc at user request" - fi -else - crcsum=`cat "$tmpfile" | CMD_ENV=xpg4 cksum | sed -e 's/ /Z/' -e 's/ /Z/' | cut -dZ -f1` - if test "$QUIET" = "n";then - echo "CRC: $crcsum" - fi -fi - -if test "$NOMD5" = y; then - if test "$QUIET" = "n";then - echo "skipping md5sum at user request" - fi -else - # Try to locate a MD5 binary - OLD_PATH=$PATH - PATH=${GUESS_MD5_PATH:-"$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"} - MD5_ARG="" - MD5_PATH=`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum` - test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which md5 || command -v md5 || type md5` - test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which digest || command -v digest || type digest` - PATH=$OLD_PATH - if test -x "$MD5_PATH"; then - if test `basename ${MD5_PATH}`x = digestx; then - MD5_ARG="-a md5" - fi - md5sum=`cat "$tmpfile" | eval "$MD5_PATH $MD5_ARG" | cut -b-32`; - if test "$QUIET" = "n";then - echo "MD5: $md5sum" - fi - else - if test "$QUIET" = "n";then - echo "MD5: none, MD5 command not found" - fi - fi -fi - -if test "$APPEND" = y; then - mv "$archname" "$archname".bak || exit - - # Prepare entry for new archive - filesizes="$filesizes $fsize" - CRCsum="$CRCsum $crcsum" - MD5sum="$MD5sum $md5sum" - USIZE=`expr $USIZE + $OLDUSIZE` - # Generate the header - . "$HEADER" - # Append the original data - tail -n +$OLDSKIP "$archname".bak >> "$archname" - # Append the new data - cat "$tmpfile" >> "$archname" - - chmod +x "$archname" - rm -f "$archname".bak - if test "$QUIET" = "n";then - echo Self-extractable archive \"$archname\" successfully updated. - fi -else - filesizes="$fsize" - CRCsum="$crcsum" - MD5sum="$md5sum" - - # Generate the header - . "$HEADER" - - # Append the compressed tar data after the stub - if test "$QUIET" = "n";then - echo - fi - cat "$tmpfile" >> "$archname" - chmod +x "$archname" - if test "$QUIET" = "n";then - echo Self-extractable archive \"$archname\" successfully created. - fi -fi -rm -f "$tmpfile" diff --git a/makeself/post-installer.sh b/makeself/post-installer.sh deleted file mode 100755 index 38cc41ef7..000000000 --- a/makeself/post-installer.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-3.0-or-later - -# This script is started using the shell of the system -# and executes our 'install-or-update.sh' script -# using the netdata supplied, statically linked BASH -# -# so, at 'install-or-update.sh' we are always sure -# we run under BASH v4. - -./bin/bash system/install-or-update.sh "${@}" diff --git a/makeself/run-all-jobs.sh b/makeself/run-all-jobs.sh deleted file mode 100755 index 7a35fe648..000000000 --- a/makeself/run-all-jobs.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -LC_ALL=C -umask 002 - -# be nice -renice 19 $$ >/dev/null 2>/dev/null - -# ----------------------------------------------------------------------------- -# prepare the environment for the jobs - -# installation directory -export NETDATA_INSTALL_PATH="${1-/opt/netdata}" - -# our source directory -export NETDATA_MAKESELF_PATH="$(dirname "${0}")" -if [ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] - then - export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}" -fi - -# netdata source directory -export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/.." - -# make sure ${NULL} is empty -export NULL= - -# ----------------------------------------------------------------------------- - -cd "${NETDATA_MAKESELF_PATH}" || exit 1 - -. ./functions.sh "${@}" || exit 1 - -for x in jobs/*.install.sh -do - progress "running ${x}" - "${x}" "${NETDATA_INSTALL_PATH}" -done - -echo >&2 "All jobs for static packaging done successfully." -exit 0 diff --git a/missing b/missing deleted file mode 100755 index db98974ff..000000000 --- a/missing +++ /dev/null @@ -1,215 +0,0 @@ -#! /bin/sh -# Common wrapper for a few potentially missing GNU programs. - -scriptversion=2013-10-28.13; # UTC - -# Copyright (C) 1996-2013 Free Software Foundation, Inc. -# Originally written by Fran,cois Pinard , 1996. - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -if test $# -eq 0; then - echo 1>&2 "Try '$0 --help' for more information" - exit 1 -fi - -case $1 in - - --is-lightweight) - # Used by our autoconf macros to check whether the available missing - # script is modern enough. - exit 0 - ;; - - --run) - # Back-compat with the calling convention used by older automake. - shift - ;; - - -h|--h|--he|--hel|--help) - echo "\ -$0 [OPTION]... PROGRAM [ARGUMENT]... - -Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due -to PROGRAM being missing or too old. - -Options: - -h, --help display this help and exit - -v, --version output version information and exit - -Supported PROGRAM values: - aclocal autoconf autoheader autom4te automake makeinfo - bison yacc flex lex help2man - -Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and -'g' are ignored when checking the name. - -Send bug reports to ." - exit $? - ;; - - -v|--v|--ve|--ver|--vers|--versi|--versio|--version) - echo "missing $scriptversion (GNU Automake)" - exit $? - ;; - - -*) - echo 1>&2 "$0: unknown '$1' option" - echo 1>&2 "Try '$0 --help' for more information" - exit 1 - ;; - -esac - -# Run the given program, remember its exit status. -"$@"; st=$? - -# If it succeeded, we are done. -test $st -eq 0 && exit 0 - -# Also exit now if we it failed (or wasn't found), and '--version' was -# passed; such an option is passed most likely to detect whether the -# program is present and works. -case $2 in --version|--help) exit $st;; esac - -# Exit code 63 means version mismatch. This often happens when the user -# tries to use an ancient version of a tool on a file that requires a -# minimum version. -if test $st -eq 63; then - msg="probably too old" -elif test $st -eq 127; then - # Program was missing. - msg="missing on your system" -else - # Program was found and executed, but failed. Give up. - exit $st -fi - -perl_URL=http://www.perl.org/ -flex_URL=http://flex.sourceforge.net/ -gnu_software_URL=http://www.gnu.org/software - -program_details () -{ - case $1 in - aclocal|automake) - echo "The '$1' program is part of the GNU Automake package:" - echo "<$gnu_software_URL/automake>" - echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" - echo "<$gnu_software_URL/autoconf>" - echo "<$gnu_software_URL/m4/>" - echo "<$perl_URL>" - ;; - autoconf|autom4te|autoheader) - echo "The '$1' program is part of the GNU Autoconf package:" - echo "<$gnu_software_URL/autoconf/>" - echo "It also requires GNU m4 and Perl in order to run:" - echo "<$gnu_software_URL/m4/>" - echo "<$perl_URL>" - ;; - esac -} - -give_advice () -{ - # Normalize program name to check for. - normalized_program=`echo "$1" | sed ' - s/^gnu-//; t - s/^gnu//; t - s/^g//; t'` - - printf '%s\n' "'$1' is $msg." - - configure_deps="'configure.ac' or m4 files included by 'configure.ac'" - case $normalized_program in - autoconf*) - echo "You should only need it if you modified 'configure.ac'," - echo "or m4 files included by it." - program_details 'autoconf' - ;; - autoheader*) - echo "You should only need it if you modified 'acconfig.h' or" - echo "$configure_deps." - program_details 'autoheader' - ;; - automake*) - echo "You should only need it if you modified 'Makefile.am' or" - echo "$configure_deps." - program_details 'automake' - ;; - aclocal*) - echo "You should only need it if you modified 'acinclude.m4' or" - echo "$configure_deps." - program_details 'aclocal' - ;; - autom4te*) - echo "You might have modified some maintainer files that require" - echo "the 'autom4te' program to be rebuilt." - program_details 'autom4te' - ;; - bison*|yacc*) - echo "You should only need it if you modified a '.y' file." - echo "You may want to install the GNU Bison package:" - echo "<$gnu_software_URL/bison/>" - ;; - lex*|flex*) - echo "You should only need it if you modified a '.l' file." - echo "You may want to install the Fast Lexical Analyzer package:" - echo "<$flex_URL>" - ;; - help2man*) - echo "You should only need it if you modified a dependency" \ - "of a man page." - echo "You may want to install the GNU Help2man package:" - echo "<$gnu_software_URL/help2man/>" - ;; - makeinfo*) - echo "You should only need it if you modified a '.texi' file, or" - echo "any other file indirectly affecting the aspect of the manual." - echo "You might want to install the Texinfo package:" - echo "<$gnu_software_URL/texinfo/>" - echo "The spurious makeinfo call might also be the consequence of" - echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" - echo "want to install GNU make:" - echo "<$gnu_software_URL/make/>" - ;; - *) - echo "You might have modified some files without having the proper" - echo "tools for further handling them. Check the 'README' file, it" - echo "often tells you about the needed prerequisites for installing" - echo "this package. You may also peek at any GNU archive site, in" - echo "case some other package contains this missing '$1' program." - ;; - esac -} - -give_advice "$1" | sed -e '1s/^/WARNING: /' \ - -e '2,$s/^/ /' >&2 - -# Propagate the correct exit status (expected to be 127 for a program -# not found, 63 for a program that failed due to version mismatch). -exit $st - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" -# time-stamp-end: "; # UTC" -# End: diff --git a/netdata-installer.sh b/netdata-installer.sh index dfeb56396..55ee5978e 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -4,29 +4,25 @@ export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin" uniquepath() { - local path="" - while read - do - if [[ ! "${path}" =~ (^|:)"${REPLY}"(:|$) ]] - then - [ ! -z "${path}" ] && path="${path}:" - path="${path}${REPLY}" - fi - done < <( echo "${PATH}" | tr ":" "\n" ) - - [ ! -z "${path}" ] && [[ "${PATH}" =~ /bin ]] && [[ "${PATH}" =~ /sbin ]] && export PATH="${path}" + local path="" + while read; do + if [[ ! ${path} =~ (^|:)"${REPLY}"(:|$) ]]; then + [ ! -z "${path}" ] && path="${path}:" + path="${path}${REPLY}" + fi + done < <(echo "${PATH}" | tr ":" "\n") + + [ ! -z "${path}" ] && [[ ${PATH} =~ /bin ]] && [[ ${PATH} =~ /sbin ]] && export PATH="${path}" } uniquepath netdata_source_dir="$(pwd)" installer_dir="$(dirname "${0}")" -if [ "${netdata_source_dir}" != "${installer_dir}" -a "${installer_dir}" != "." ] - then - echo >&2 "Warning: you are currently in '${netdata_source_dir}' but the installer is in '${installer_dir}'." +if [ "${netdata_source_dir}" != "${installer_dir}" -a "${installer_dir}" != "." ]; then + echo >&2 "Warning: you are currently in '${netdata_source_dir}' but the installer is in '${installer_dir}'." fi - # ----------------------------------------------------------------------------- # reload the user profile @@ -35,28 +31,36 @@ fi # make sure /etc/profile does not change our current directory cd "${netdata_source_dir}" || exit 1 - # ----------------------------------------------------------------------------- # load the required functions -if [ -f "${installer_dir}/installer/functions.sh" ] - then - source "${installer_dir}/installer/functions.sh" || exit 1 +if [ -f "${installer_dir}/packaging/installer/functions.sh" ]; then + source "${installer_dir}/packaging/installer/functions.sh" || exit 1 else - source "${netdata_source_dir}/installer/functions.sh" || exit 1 + source "${netdata_source_dir}/packaging/installer/functions.sh" || exit 1 fi +download() { + url="${1}" + dest="${2}" + if command -v wget >/dev/null 2>&1; then + run wget -O - "${url}" >"${dest}" || fatal "Cannot download ${url}" + elif command -v curl >/dev/null 2>&1; then + run curl "${url}" >"${dest}" || fatal "Cannot download ${url}" + else + fatal "I need curl or wget to proceed, but neither is available on this system." + fi +} + # make sure we save all commands we run run_logfile="netdata-installer.log" - # ----------------------------------------------------------------------------- # fix PKG_CHECK_MODULES error -if [ -d /usr/share/aclocal ] -then - ACLOCAL_PATH=${ACLOCAL_PATH-/usr/share/aclocal} - export ACLOCAL_PATH +if [ -d /usr/share/aclocal ]; then + ACLOCAL_PATH=${ACLOCAL_PATH-/usr/share/aclocal} + export ACLOCAL_PATH fi export LC_ALL=C @@ -72,12 +76,15 @@ CFLAGS="${CFLAGS--O2}" # keep a log of this command printf "\n# " >>netdata-installer.log date >>netdata-installer.log -printf "CFLAGS=\"%s\" " "${CFLAGS}" >>netdata-installer.log +printf 'CFLAGS="%s" ' "${CFLAGS}" >>netdata-installer.log printf "%q " "$0" "${@}" >>netdata-installer.log printf "\n" >>netdata-installer.log REINSTALL_PWD="${PWD}" -REINSTALL_COMMAND="$(printf "%q " "$0" "${@}"; printf "\n")" +REINSTALL_COMMAND="$( + printf "%q " "$0" "${@}" + printf "\n" +)" # remove options that shown not be inherited by netdata-updater.sh REINSTALL_COMMAND="${REINSTALL_COMMAND// --dont-wait/}" REINSTALL_COMMAND="${REINSTALL_COMMAND// --dont-start-it/}" @@ -95,8 +102,8 @@ LIBS_ARE_HERE=0 NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS-}" usage() { - netdata_banner "installer command line options" - cat < @@ -155,6 +162,14 @@ Valid are: Use this option to allow it continue without checking pkg-config. + --disable-telemetry + + Use this flag to opt-out from our anonymous telemetry progam. + + --disable-go + + Flag to disable installation of go.d.plugin + Netdata will by default be compiled with gcc optimization -O2 If you need to pass different CFLAGS, use something like this: @@ -173,67 +188,59 @@ For the plugins, you will at least need: USAGE } -while [ ! -z "${1}" ] -do - if [ "$1" = "--install" ] - then - NETDATA_PREFIX="${2}/netdata" - shift 2 - elif [ "$1" = "--zlib-is-really-here" -o "$1" = "--libs-are-really-here" ] - then - LIBS_ARE_HERE=1 - shift 1 - elif [ "$1" = "--dont-start-it" ] - then - DONOTSTART=1 - shift 1 - elif [ "$1" = "--dont-wait" ] - then - DONOTWAIT=1 - shift 1 - elif [ "$1" = "--auto-update" -o "$1" = "-u" ] - then - AUTOUPDATE=1 - shift 1 - elif [ "$1" = "--enable-plugin-freeipmi" ] - then - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-plugin-freeipmi/} --enable-plugin-freeipmi" - shift 1 - elif [ "$1" = "--disable-plugin-freeipmi" ] - then - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-plugin-freeipmi/} --disable-plugin-freeipmi" - shift 1 - elif [ "$1" = "--enable-plugin-nfacct" ] - then - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-plugin-nfacct/} --enable-plugin-nfacct" - shift 1 - elif [ "$1" = "--disable-plugin-nfacct" ] - then - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-plugin-nfacct/} --disable-plugin-nfacct" - shift 1 - elif [ "$1" = "--enable-lto" ] - then - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-lto/} --enable-lto" - shift 1 - elif [ "$1" = "--disable-lto" ] - then - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-lto/} --disable-lto" - shift 1 - elif [ "$1" = "--disable-x86-sse" ] - then - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-x86-sse/} --disable-x86-sse" - shift 1 - elif [ "$1" = "--help" -o "$1" = "-h" ] - then - usage - exit 1 - else - echo >&2 - echo >&2 "ERROR:" - echo >&2 "I cannot understand option '$1'." - usage - exit 1 - fi +while [ ! -z "${1}" ]; do + if [ "$1" = "--install" ]; then + NETDATA_PREFIX="${2}/netdata" + shift 2 + elif [ "$1" = "--zlib-is-really-here" -o "$1" = "--libs-are-really-here" ]; then + LIBS_ARE_HERE=1 + shift 1 + elif [ "$1" = "--dont-start-it" ]; then + DONOTSTART=1 + shift 1 + elif [ "$1" = "--dont-wait" ]; then + DONOTWAIT=1 + shift 1 + elif [ "$1" = "--auto-update" -o "$1" = "-u" ]; then + AUTOUPDATE=1 + shift 1 + elif [ "$1" = "--enable-plugin-freeipmi" ]; then + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-plugin-freeipmi/} --enable-plugin-freeipmi" + shift 1 + elif [ "$1" = "--disable-plugin-freeipmi" ]; then + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-plugin-freeipmi/} --disable-plugin-freeipmi" + shift 1 + elif [ "$1" = "--enable-plugin-nfacct" ]; then + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-plugin-nfacct/} --enable-plugin-nfacct" + shift 1 + elif [ "$1" = "--disable-plugin-nfacct" ]; then + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-plugin-nfacct/} --disable-plugin-nfacct" + shift 1 + elif [ "$1" = "--enable-lto" ]; then + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-lto/} --enable-lto" + shift 1 + elif [ "$1" = "--disable-lto" ]; then + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-lto/} --disable-lto" + shift 1 + elif [ "$1" = "--disable-x86-sse" ]; then + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-x86-sse/} --disable-x86-sse" + shift 1 + elif [ "$1" = "--disable-telemetry" ]; then + NETDATA_DISABLE_TELEMETRY=1 + shift 1 + elif [ "$1" = "--disable-go" ]; then + NETDATA_DISABLE_GO=1 + shift 1 + elif [ "$1" = "--help" -o "$1" = "-h" ]; then + usage + exit 1 + else + echo >&2 + echo >&2 "ERROR:" + echo >&2 "I cannot understand option '$1'." + usage + exit 1 + fi done # replace multiple spaces with a single space @@ -267,86 +274,79 @@ cat < /dev/null)" ] -then - autoconf_maj_min() { - local maj min IFS=.- - - maj=$1 - min=$2 - - set -- $(autoreconf -V | sed -ne '1s/.* \([^ ]*\)$/\1/p') - eval $maj=\$1 $min=\$2 - } - autoconf_maj_min AMAJ AMIN - - if [ "$AMAJ" -gt 2 ] - then - have_autotools=Y - elif [ "$AMAJ" -eq 2 -a "$AMIN" -ge 60 ] - then - have_autotools=Y - else - echo "Found autotools $AMAJ.$AMIN" - fi +if [ "$(type autoreconf 2>/dev/null)" ]; then + autoconf_maj_min() { + local maj min IFS=.- + + maj=$1 + min=$2 + + set -- $(autoreconf -V | sed -ne '1s/.* \([^ ]*\)$/\1/p') + eval $maj=\$1 $min=\$2 + } + autoconf_maj_min AMAJ AMIN + + if [ "$AMAJ" -gt 2 ]; then + have_autotools=Y + elif [ "$AMAJ" -eq 2 -a "$AMIN" -ge 60 ]; then + have_autotools=Y + else + echo "Found autotools $AMAJ.$AMIN" + fi else - echo "No autotools found" + echo "No autotools found" fi -if [ ! "$have_autotools" ] -then - if [ -f configure ] - then - echo "Will skip autoreconf step" - else - netdata_banner "autotools v2.60 required" - cat <<"EOF" +if [ ! "$have_autotools" ]; then + if [ -f configure ]; then + echo "Will skip autoreconf step" + else + netdata_banner "autotools v2.60 required" + cat <<"EOF" ------------------------------------------------------------------------------- autotools 2.60 or later is required @@ -355,25 +355,23 @@ Sorry, you do not seem to have autotools 2.60 or later, which is required to build from the git sources of netdata. EOF - exit 1 - fi + exit 1 + fi fi -if [ ${DONOTWAIT} -eq 0 ] - then - if [ ! -z "${NETDATA_PREFIX}" ] - then - eval "read >&2 -ep \$'\001${TPUT_BOLD}${TPUT_GREEN}\002Press ENTER to build and install netdata to \'\001${TPUT_CYAN}\002${NETDATA_PREFIX}\001${TPUT_YELLOW}\002\'\001${TPUT_RESET}\002 > ' -e -r REPLY" - [ $? -ne 0 ] && exit 1 - else - eval "read >&2 -ep \$'\001${TPUT_BOLD}${TPUT_GREEN}\002Press ENTER to build and install netdata to your system\001${TPUT_RESET}\002 > ' -e -r REPLY" - [ $? -ne 0 ] && exit 1 - fi +if [ ${DONOTWAIT} -eq 0 ]; then + if [ ! -z "${NETDATA_PREFIX}" ]; then + eval "read >&2 -ep \$'\001${TPUT_BOLD}${TPUT_GREEN}\002Press ENTER to build and install netdata to \'\001${TPUT_CYAN}\002${NETDATA_PREFIX}\001${TPUT_YELLOW}\002\'\001${TPUT_RESET}\002 > ' -e -r REPLY" + [ $? -ne 0 ] && exit 1 + else + eval "read >&2 -ep \$'\001${TPUT_BOLD}${TPUT_GREEN}\002Press ENTER to build and install netdata to your system\001${TPUT_RESET}\002 > ' -e -r REPLY" + [ $? -ne 0 ] && exit 1 + fi fi build_error() { - netdata_banner "sorry, it failed to build..." - cat <&2 "ok, assuming libs are really installed." - export ZLIB_CFLAGS=" " - export ZLIB_LIBS="-lz" - export UUID_CFLAGS=" " - export UUID_LIBS="-luuid" +if [ ${LIBS_ARE_HERE} -eq 1 ]; then + shift + echo >&2 "ok, assuming libs are really installed." + export ZLIB_CFLAGS=" " + export ZLIB_LIBS="-lz" + export UUID_CFLAGS=" " + export UUID_LIBS="-luuid" fi trap build_error EXIT - # ----------------------------------------------------------------------------- echo >&2 progress "Run autotools to configure the build environment" -if [ "$have_autotools" ] -then - run autoreconf -ivf || exit 1 +if [ "$have_autotools" ]; then + run autoreconf -ivf || exit 1 fi run ./configure \ - --prefix="${NETDATA_PREFIX}/usr" \ - --sysconfdir="${NETDATA_PREFIX}/etc" \ - --localstatedir="${NETDATA_PREFIX}/var" \ - --with-zlib \ - --with-math \ - --with-user=netdata \ - ${NETDATA_CONFIGURE_OPTIONS} \ - CFLAGS="${CFLAGS}" || exit 1 + --prefix="${NETDATA_PREFIX}/usr" \ + --sysconfdir="${NETDATA_PREFIX}/etc" \ + --localstatedir="${NETDATA_PREFIX}/var" \ + --with-zlib \ + --with-math \ + --with-user=netdata \ + ${NETDATA_CONFIGURE_OPTIONS} \ + CFLAGS="${CFLAGS}" || exit 1 # remove the build_error hook trap - EXIT @@ -445,57 +440,46 @@ trap - EXIT # ----------------------------------------------------------------------------- progress "Cleanup compilation directory" -[ -f src/netdata ] && run make clean - +run make clean # ----------------------------------------------------------------------------- progress "Compile netdata" run make -j${SYSTEM_CPUS} || exit 1 - # ----------------------------------------------------------------------------- progress "Migrate configuration files for node.d.plugin and charts.d.plugin" # migrate existing configuration files # for node.d and charts.d -if [ -d "${NETDATA_PREFIX}/etc/netdata" ] - then - # the configuration directory exists - - if [ ! -d "${NETDATA_PREFIX}/etc/netdata/charts.d" ] - then - run mkdir "${NETDATA_PREFIX}/etc/netdata/charts.d" - fi - - # move the charts.d config files - for x in apache ap cpu_apps cpufreq example exim hddtemp load_average mem_apps mysql nginx nut opensips phpfpm postfix sensors squid tomcat - do - for y in "" ".old" ".orig" - do - if [ -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" -a ! -f "${NETDATA_PREFIX}/etc/netdata/charts.d/${x}.conf${y}" ] - then - run mv -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" "${NETDATA_PREFIX}/etc/netdata/charts.d/${x}.conf${y}" - fi - done - done - - if [ ! -d "${NETDATA_PREFIX}/etc/netdata/node.d" ] - then - run mkdir "${NETDATA_PREFIX}/etc/netdata/node.d" - fi - - # move the node.d config files - for x in named sma_webbox snmp - do - for y in "" ".old" ".orig" - do - if [ -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" -a ! -f "${NETDATA_PREFIX}/etc/netdata/node.d/${x}.conf${y}" ] - then - run mv -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" "${NETDATA_PREFIX}/etc/netdata/node.d/${x}.conf${y}" - fi - done - done +if [ -d "${NETDATA_PREFIX}/etc/netdata" ]; then + # the configuration directory exists + + if [ ! -d "${NETDATA_PREFIX}/etc/netdata/charts.d" ]; then + run mkdir "${NETDATA_PREFIX}/etc/netdata/charts.d" + fi + + # move the charts.d config files + for x in apache ap cpu_apps cpufreq example exim hddtemp load_average mem_apps mysql nginx nut opensips phpfpm postfix sensors squid tomcat; do + for y in "" ".old" ".orig"; do + if [ -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" -a ! -f "${NETDATA_PREFIX}/etc/netdata/charts.d/${x}.conf${y}" ]; then + run mv -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" "${NETDATA_PREFIX}/etc/netdata/charts.d/${x}.conf${y}" + fi + done + done + + if [ ! -d "${NETDATA_PREFIX}/etc/netdata/node.d" ]; then + run mkdir "${NETDATA_PREFIX}/etc/netdata/node.d" + fi + + # move the node.d config files + for x in named sma_webbox snmp; do + for y in "" ".old" ".orig"; do + if [ -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" -a ! -f "${NETDATA_PREFIX}/etc/netdata/node.d/${x}.conf${y}" ]; then + run mv -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" "${NETDATA_PREFIX}/etc/netdata/node.d/${x}.conf${y}" + fi + done + done fi # ----------------------------------------------------------------------------- @@ -504,72 +488,63 @@ fi md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" deleted_stock_configs=0 -if [ ! -f "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done" ] -then - - progress "Backup existing netdata configuration before installing it" - - if [ "${BASH_VERSINFO[0]}" -ge "4" ] - then - declare -A configs_signatures=() - if [ -f "configs.signatures" ] - then - source "configs.signatures" || echo >&2 "ERROR: Failed to load configs.signatures !" - fi - fi - - config_signature_matches() { - local md5="${1}" file="${2}" - - if [ "${BASH_VERSINFO[0]}" -ge "4" ] - then - [ "${configs_signatures[${md5}]}" = "${file}" ] && return 0 - return 1 - fi - - if [ -f "configs.signatures" ] - then - grep "\['${md5}'\]='${file}'" "configs.signatures" >/dev/null - return $? - fi - - return 1 - } - - # clean up stock config files from the user configuration directory - for x in $(find -L "${NETDATA_PREFIX}/etc/netdata" -type f) - do - if [ -f "${x}" ] - then - # find it relative filename - f="${x/${NETDATA_PREFIX}\/etc\/netdata\//}" - - # find the stock filename - t="${f/.conf.installer_backup.*/.conf}" - t="${t/.conf.old/.conf}" - t="${t/.conf.orig/.conf}" - - if [ -z "${md5sum}" -o ! -x "${md5sum}" ] - then - # we don't have md5sum - keep it - echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RET}is not known to distribution${TPUT_RESET}. Keeping it." - else - # find its checksum - md5="$(${md5sum} <"${x}" | cut -d ' ' -f 1)" - - if config_signature_matches "${md5}" "${t}" - then - # it is a stock version - remove it - echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' is stock version of '${t}'." - run rm -f "${x}" - deleted_stock_configs=$(( deleted_stock_configs + 1 )) - else - # edited by user - keep it - echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED} does not match stock of '${t}'${TPUT_RESET}. Keeping it." - fi - fi - fi - done +if [ ! -f "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done" ]; then + + progress "Backup existing netdata configuration before installing it" + + if [ "${BASH_VERSINFO[0]}" -ge "4" ]; then + declare -A configs_signatures=() + if [ -f "configs.signatures" ]; then + source "configs.signatures" || echo >&2 "ERROR: Failed to load configs.signatures !" + fi + fi + + config_signature_matches() { + local md5="${1}" file="${2}" + + if [ "${BASH_VERSINFO[0]}" -ge "4" ]; then + [ "${configs_signatures[${md5}]}" = "${file}" ] && return 0 + return 1 + fi + + if [ -f "configs.signatures" ]; then + grep "\['${md5}'\]='${file}'" "configs.signatures" >/dev/null + return $? + fi + + return 1 + } + + # clean up stock config files from the user configuration directory + for x in $(find -L "${NETDATA_PREFIX}/etc/netdata" -type f); do + if [ -f "${x}" ]; then + # find it relative filename + f="${x/${NETDATA_PREFIX}\/etc\/netdata\//}" + + # find the stock filename + t="${f/.conf.installer_backup.*/.conf}" + t="${t/.conf.old/.conf}" + t="${t/.conf.orig/.conf}" + + if [ -z "${md5sum}" -o ! -x "${md5sum}" ]; then + # we don't have md5sum - keep it + echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RET}is not known to distribution${TPUT_RESET}. Keeping it." + else + # find its checksum + md5="$(${md5sum} <"${x}" | cut -d ' ' -f 1)" + + if config_signature_matches "${md5}" "${t}"; then + # it is a stock version - remove it + echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' is stock version of '${t}'." + run rm -f "${x}" + deleted_stock_configs=$((deleted_stock_configs + 1)) + else + # edited by user - keep it + echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED} does not match stock of '${t}'${TPUT_RESET}. Keeping it." + fi + fi + fi + done fi touch "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done" @@ -578,13 +553,11 @@ progress "Install netdata" run make install || exit 1 - # ----------------------------------------------------------------------------- progress "Fix generated files permissions" run find ./system/ -type f -a \! -name \*.in -a \! -name Makefile\* -a \! -name \*.conf -a \! -name \*.service -a \! -name \*.logrotate -exec chmod 755 {} \; - # ----------------------------------------------------------------------------- progress "Add user netdata to required user groups" @@ -592,68 +565,63 @@ homedir="${NETDATA_PREFIX}/var/lib/netdata" [ ! -z "${NETDATA_PREFIX}" ] && homedir="${NETDATA_PREFIX}" add_netdata_user_and_group "${homedir}" || run_failed "The installer does not run as root." - # ----------------------------------------------------------------------------- progress "Install logrotate configuration for netdata" install_netdata_logrotate - # ----------------------------------------------------------------------------- progress "Read installation options from netdata.conf" # create an empty config if it does not exist -[ ! -f "${NETDATA_PREFIX}/etc/netdata/netdata.conf" ] && \ - touch "${NETDATA_PREFIX}/etc/netdata/netdata.conf" +[ ! -f "${NETDATA_PREFIX}/etc/netdata/netdata.conf" ] && + touch "${NETDATA_PREFIX}/etc/netdata/netdata.conf" # function to extract values from the config file config_option() { - local section="${1}" key="${2}" value="${3}" - - if [ -s "${NETDATA_PREFIX}/etc/netdata/netdata.conf" ] - then - "${NETDATA_PREFIX}/usr/sbin/netdata" \ - -c "${NETDATA_PREFIX}/etc/netdata/netdata.conf" \ - -W get "${section}" "${key}" "${value}" || \ - echo "${value}" - else - echo "${value}" - fi + local section="${1}" key="${2}" value="${3}" + + if [ -s "${NETDATA_PREFIX}/etc/netdata/netdata.conf" ]; then + "${NETDATA_PREFIX}/usr/sbin/netdata" \ + -c "${NETDATA_PREFIX}/etc/netdata/netdata.conf" \ + -W get "${section}" "${key}" "${value}" || + echo "${value}" + else + echo "${value}" + fi } # the user netdata will run as -if [ "${UID}" = "0" ] - then - NETDATA_USER="$( config_option "global" "run as user" "netdata" )" - ROOT_USER="root" +if [ "${UID}" = "0" ]; then + NETDATA_USER="$(config_option "global" "run as user" "netdata")" + ROOT_USER="root" else - NETDATA_USER="${USER}" - ROOT_USER="${NETDATA_USER}" + NETDATA_USER="${USER}" + ROOT_USER="${NETDATA_USER}" fi NETDATA_GROUP="$(id -g -n ${NETDATA_USER})" [ -z "${NETDATA_GROUP}" ] && NETDATA_GROUP="${NETDATA_USER}" # the owners of the web files -NETDATA_WEB_USER="$( config_option "web" "web files owner" "${NETDATA_USER}" )" +NETDATA_WEB_USER="$(config_option "web" "web files owner" "${NETDATA_USER}")" NETDATA_WEB_GROUP="${NETDATA_GROUP}" -if [ "${UID}" = "0" -a "${NETDATA_USER}" != "${NETDATA_WEB_USER}" ] -then - NETDATA_WEB_GROUP="$(id -g -n ${NETDATA_WEB_USER})" - [ -z "${NETDATA_WEB_GROUP}" ] && NETDATA_WEB_GROUP="${NETDATA_WEB_USER}" +if [ "${UID}" = "0" -a "${NETDATA_USER}" != "${NETDATA_WEB_USER}" ]; then + NETDATA_WEB_GROUP="$(id -g -n ${NETDATA_WEB_USER})" + [ -z "${NETDATA_WEB_GROUP}" ] && NETDATA_WEB_GROUP="${NETDATA_WEB_USER}" fi -NETDATA_WEB_GROUP="$( config_option "web" "web files group" "${NETDATA_WEB_GROUP}" )" +NETDATA_WEB_GROUP="$(config_option "web" "web files group" "${NETDATA_WEB_GROUP}")" # port defport=19999 -NETDATA_PORT="$( config_option "web" "default port" ${defport} )" +NETDATA_PORT="$(config_option "web" "default port" ${defport})" # directories -NETDATA_LIB_DIR="$( config_option "global" "lib directory" "${NETDATA_PREFIX}/var/lib/netdata" )" -NETDATA_CACHE_DIR="$( config_option "global" "cache directory" "${NETDATA_PREFIX}/var/cache/netdata" )" -NETDATA_WEB_DIR="$( config_option "global" "web files directory" "${NETDATA_PREFIX}/usr/share/netdata/web" )" -NETDATA_LOG_DIR="$( config_option "global" "log directory" "${NETDATA_PREFIX}/var/log/netdata" )" -NETDATA_USER_CONFIG_DIR="$( config_option "global" "config directory" "${NETDATA_PREFIX}/etc/netdata" )" -NETDATA_STOCK_CONFIG_DIR="$( config_option "global" "stock config directory" "${NETDATA_PREFIX}/usr/lib/netdata/conf.d" )" +NETDATA_LIB_DIR="$(config_option "global" "lib directory" "${NETDATA_PREFIX}/var/lib/netdata")" +NETDATA_CACHE_DIR="$(config_option "global" "cache directory" "${NETDATA_PREFIX}/var/cache/netdata")" +NETDATA_WEB_DIR="$(config_option "global" "web files directory" "${NETDATA_PREFIX}/usr/share/netdata/web")" +NETDATA_LOG_DIR="$(config_option "global" "log directory" "${NETDATA_PREFIX}/var/log/netdata")" +NETDATA_USER_CONFIG_DIR="$(config_option "global" "config directory" "${NETDATA_PREFIX}/etc/netdata")" +NETDATA_STOCK_CONFIG_DIR="$(config_option "global" "stock config directory" "${NETDATA_PREFIX}/usr/lib/netdata/conf.d")" NETDATA_RUN_DIR="${NETDATA_PREFIX}/var/run" cat <&2 "Creating directory '${NETDATA_USER_CONFIG_DIR}/${x}'" - run mkdir -p "${NETDATA_USER_CONFIG_DIR}/${x}" || exit 1 - fi +for x in "python.d" "charts.d" "node.d" "health.d" "statsd.d" "go.d"; do + if [ ! -d "${NETDATA_USER_CONFIG_DIR}/${x}" ]; then + echo >&2 "Creating directory '${NETDATA_USER_CONFIG_DIR}/${x}'" + run mkdir -p "${NETDATA_USER_CONFIG_DIR}/${x}" || exit 1 + fi done run chown -R "${ROOT_USER}:${NETDATA_GROUP}" "${NETDATA_USER_CONFIG_DIR}" run find "${NETDATA_USER_CONFIG_DIR}" -type f -exec chmod 0640 {} \; @@ -709,13 +674,11 @@ run chmod 755 "${NETDATA_USER_CONFIG_DIR}/edit-config" helplink="000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES" [ ${deleted_stock_configs} -eq 0 ] && helplink="" -for link in "orig" "${helplink}" -do - if [ ! -z "${link}" ] - then - [ -L "${NETDATA_USER_CONFIG_DIR}/${link}" ] && run rm -f "${NETDATA_USER_CONFIG_DIR}/${link}" - run ln -s "${NETDATA_STOCK_CONFIG_DIR}" "${NETDATA_USER_CONFIG_DIR}/${link}" - fi +for link in "orig" "${helplink}"; do + if [ ! -z "${link}" ]; then + [ -L "${NETDATA_USER_CONFIG_DIR}/${link}" ] && run rm -f "${NETDATA_USER_CONFIG_DIR}/${link}" + run ln -s "${NETDATA_STOCK_CONFIG_DIR}" "${NETDATA_USER_CONFIG_DIR}/${link}" + fi done run chown -R "${ROOT_USER}:${NETDATA_GROUP}" "${NETDATA_STOCK_CONFIG_DIR}" run find "${NETDATA_STOCK_CONFIG_DIR}" -type f -exec chmod 0640 {} \; @@ -723,10 +686,9 @@ run find "${NETDATA_STOCK_CONFIG_DIR}" -type d -exec chmod 0755 {} \; # --- web dir ---- -if [ ! -d "${NETDATA_WEB_DIR}" ] - then - echo >&2 "Creating directory '${NETDATA_WEB_DIR}'" - run mkdir -p "${NETDATA_WEB_DIR}" || exit 1 +if [ ! -d "${NETDATA_WEB_DIR}" ]; then + echo >&2 "Creating directory '${NETDATA_WEB_DIR}'" + run mkdir -p "${NETDATA_WEB_DIR}" || exit 1 fi run chown -R "${NETDATA_WEB_USER}:${NETDATA_WEB_GROUP}" "${NETDATA_WEB_DIR}" run find "${NETDATA_WEB_DIR}" -type f -exec chmod 0664 {} \; @@ -734,101 +696,146 @@ run find "${NETDATA_WEB_DIR}" -type d -exec chmod 0775 {} \; # --- data dirs ---- -for x in "${NETDATA_LIB_DIR}" "${NETDATA_CACHE_DIR}" "${NETDATA_LOG_DIR}" -do - if [ ! -d "${x}" ] - then - echo >&2 "Creating directory '${x}'" - run mkdir -p "${x}" || exit 1 - fi - - run chown -R "${NETDATA_USER}:${NETDATA_GROUP}" "${x}" - #run find "${x}" -type f -exec chmod 0660 {} \; - #run find "${x}" -type d -exec chmod 0770 {} \; +for x in "${NETDATA_LIB_DIR}" "${NETDATA_CACHE_DIR}" "${NETDATA_LOG_DIR}"; do + if [ ! -d "${x}" ]; then + echo >&2 "Creating directory '${x}'" + run mkdir -p "${x}" || exit 1 + fi + + run chown -R "${NETDATA_USER}:${NETDATA_GROUP}" "${x}" + #run find "${x}" -type f -exec chmod 0660 {} \; + #run find "${x}" -type d -exec chmod 0770 {} \; done run chmod 755 "${NETDATA_LOG_DIR}" # --- plugins ---- -if [ ${UID} -eq 0 ] - then - # find the admin group - admin_group= - test -z "${admin_group}" && getent group root >/dev/null 2>&1 && admin_group="root" - test -z "${admin_group}" && getent group daemon >/dev/null 2>&1 && admin_group="daemon" - test -z "${admin_group}" && admin_group="${NETDATA_GROUP}" - - run chown "${NETDATA_USER}:${admin_group}" "${NETDATA_LOG_DIR}" - run chown -R root "${NETDATA_PREFIX}/usr/libexec/netdata" - run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type d -exec chmod 0755 {} \; - run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -exec chmod 0644 {} \; - run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -a -name \*.plugin -exec chmod 0755 {} \; - run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -a -name \*.sh -exec chmod 0755 {} \; - - if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" ] - then - setcap_ret=1 - if ! iscontainer - then - if [ ! -z "${setcap}" ] - then - run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" - run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" - run setcap cap_dac_read_search,cap_sys_ptrace+ep "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" - setcap_ret=$? - fi - - if [ ${setcap_ret} -eq 0 ] - then - # if we managed to setcap - # but we fail to execute apps.plugin - # trigger setuid to root - "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" -t >/dev/null 2>&1 - setcap_ret=$? - fi - fi - - if [ ${setcap_ret} -ne 0 ] - then - # fix apps.plugin to be setuid to root - run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" - run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" - fi - fi - - if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin" ] - then - run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin" - run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin" - fi - - if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network" ] - then - run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network" - run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network" - fi - - if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network-helper.sh" ] - then - run chown root "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network-helper.sh" - run chmod 0550 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network-helper.sh" - fi +if [ ${UID} -eq 0 ]; then + # find the admin group + admin_group= + test -z "${admin_group}" && getent group root >/dev/null 2>&1 && admin_group="root" + test -z "${admin_group}" && getent group daemon >/dev/null 2>&1 && admin_group="daemon" + test -z "${admin_group}" && admin_group="${NETDATA_GROUP}" + + run chown "${NETDATA_USER}:${admin_group}" "${NETDATA_LOG_DIR}" + run chown -R root "${NETDATA_PREFIX}/usr/libexec/netdata" + run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type d -exec chmod 0755 {} \; + run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -exec chmod 0644 {} \; + run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -a -name \*.plugin -exec chmod 0755 {} \; + run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -a -name \*.sh -exec chmod 0755 {} \; + + if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" ]; then + setcap_ret=1 + if ! iscontainer; then + if [ ! -z "${setcap}" ]; then + run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" + run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" + run setcap cap_dac_read_search,cap_sys_ptrace+ep "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" + setcap_ret=$? + fi + + if [ ${setcap_ret} -eq 0 ]; then + # if we managed to setcap + # but we fail to execute apps.plugin + # trigger setuid to root + "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" -t >/dev/null 2>&1 + setcap_ret=$? + fi + fi + + if [ ${setcap_ret} -ne 0 ]; then + # fix apps.plugin to be setuid to root + run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" + run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/apps.plugin" + fi + fi + + if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin" ]; then + run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin" + run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin" + fi + + if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network" ]; then + run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network" + run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network" + fi + + if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network-helper.sh" ]; then + run chown root "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network-helper.sh" + run chmod 0550 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/cgroup-network-helper.sh" + fi else - # non-privileged user installation - run chown "${NETDATA_USER}:${NETDATA_GROUP}" "${NETDATA_LOG_DIR}" - run chown -R "${NETDATA_USER}:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata" - run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -exec chmod 0755 {} \; - run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type d -exec chmod 0755 {} \; + # non-privileged user installation + run chown "${NETDATA_USER}:${NETDATA_GROUP}" "${NETDATA_LOG_DIR}" + run chown -R "${NETDATA_USER}:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata" + run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type f -exec chmod 0755 {} \; + run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type d -exec chmod 0755 {} \; fi -# --- fix #1292 bug --- +# ----------------------------------------------------------------------------- -[ -d "${NETDATA_PREFIX}/usr/libexec" ] && run chmod a+rX "${NETDATA_PREFIX}/usr/libexec" -[ -d "${NETDATA_PREFIX}/usr/share/netdata" ] && run chmod a+rX "${NETDATA_PREFIX}/usr/share/netdata" +install_go() { + # When updating this value, ensure correct checksums in packaging/go.d.checksums + GO_PACKAGE_VERSION="v0.0.2" + ARCH_MAP=( + 'i386::386' + 'i686::386' + 'x86_64::amd64' + 'aarch64::arm64' + 'armv64::arm64' + 'armv6l::arm' + 'armv7l::arm' + 'armv5tel::arm' + ) + + if [ -z "${NETDATA_DISABLE_GO+x}" ]; then + progress "Install go.d.plugin" + ARCH=$(uname -m) + OS=$(uname -s | tr '[:upper:]' '[:lower:]') + + for index in "${ARCH_MAP[@]}" ; do + KEY="${index%%::*}" + VALUE="${index##*::}" + if [ "$KEY" == "$ARCH" ]; then + ARCH="${VALUE}" + break + fi + done + tmp=$(mktemp -d /tmp/netdata-go-XXXXXX) + GO_PACKAGE_BASENAME="go.d.plugin-$GO_PACKAGE_VERSION.$OS-$ARCH" + download "https://github.com/netdata/go.d.plugin/releases/download/$GO_PACKAGE_VERSION/$GO_PACKAGE_BASENAME" "${tmp}/$GO_PACKAGE_BASENAME" + download "https://github.com/netdata/go.d.plugin/releases/download/$GO_PACKAGE_VERSION/config.tar.gz" "${tmp}/config.tar.gz" + grep "${GO_PACKAGE_BASENAME}" "${installer_dir}/packaging/go.d.checksums" > "${tmp}/sha256sums.txt" 2>/dev/null + grep "config.tar.gz" "${installer_dir}/packaging/go.d.checksums" >> "${tmp}/sha256sums.txt" 2>/dev/null + + # Checksum validation + if ! (cd "${tmp}" && sha256sum -c "sha256sums.txt"); then + run_failed "go.d.plugin package files checksum validation failed." + return 1 + fi + + # Install new files + run rm -rf "${NETDATA_STOCK_CONFIG_DIR}/go.d" + run rm -rf "${NETDATA_STOCK_CONFIG_DIR}/go.d.conf" + run tar -xf "${tmp}/config.tar.gz" -C "${NETDATA_STOCK_CONFIG_DIR}/" + run chown -R "${ROOT_USER}:${NETDATA_GROUP}" "${NETDATA_STOCK_CONFIG_DIR}" + + run mv "${tmp}/$GO_PACKAGE_BASENAME" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin" + if [ ${UID} -eq 0 ]; then + run chown root:${NETDATA_GROUP} "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin" + fi + run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin" + fi + return 0 +} +install_go +# --- fix #1292 bug --- +[ -d "${NETDATA_PREFIX}/usr/libexec" ] && run chmod a+rX "${NETDATA_PREFIX}/usr/libexec" +[ -d "${NETDATA_PREFIX}/usr/share/netdata" ] && run chmod a+rX "${NETDATA_PREFIX}/usr/share/netdata" # ----------------------------------------------------------------------------- progress "Install netdata at system init" @@ -836,42 +843,38 @@ progress "Install netdata at system init" NETDATA_START_CMD="${NETDATA_PREFIX}/usr/sbin/netdata" install_netdata_service || run_failed "Cannot install netdata init service." - # ----------------------------------------------------------------------------- # check if we can re-start netdata started=0 -if [ ${DONOTSTART} -eq 1 ] - then - generate_netdata_conf "${NETDATA_USER}" "${NETDATA_PREFIX}/etc/netdata/netdata.conf" "http://localhost:${NETDATA_PORT}/netdata.conf" +if [ ${DONOTSTART} -eq 1 ]; then + generate_netdata_conf "${NETDATA_USER}" "${NETDATA_PREFIX}/etc/netdata/netdata.conf" "http://localhost:${NETDATA_PORT}/netdata.conf" else - restart_netdata ${NETDATA_PREFIX}/usr/sbin/netdata "${@}" - if [ $? -ne 0 ] - then - echo >&2 - echo >&2 "SORRY! FAILED TO START NETDATA!" - echo >&2 - exit 1 - fi - - started=1 - echo >&2 "OK. NetData Started!" - echo >&2 - - # ----------------------------------------------------------------------------- - # save a config file, if it is not already there - - download_netdata_conf "${NETDATA_USER}" "${NETDATA_PREFIX}/etc/netdata/netdata.conf" "http://localhost:${NETDATA_PORT}/netdata.conf" + restart_netdata ${NETDATA_PREFIX}/usr/sbin/netdata "${@}" + if [ $? -ne 0 ]; then + echo >&2 + echo >&2 "SORRY! FAILED TO START NETDATA!" + echo >&2 + exit 1 + fi + + started=1 + echo >&2 "OK. NetData Started!" + echo >&2 + + # ----------------------------------------------------------------------------- + # save a config file, if it is not already there + + download_netdata_conf "${NETDATA_USER}" "${NETDATA_PREFIX}/etc/netdata/netdata.conf" "http://localhost:${NETDATA_PORT}/netdata.conf" fi -if [ "$(uname)" = "Linux" ] -then - # ------------------------------------------------------------------------- - progress "Check KSM (kernel memory deduper)" +if [ "$(uname)" = "Linux" ]; then + # ------------------------------------------------------------------------- + progress "Check KSM (kernel memory deduper)" - ksm_is_available_but_disabled() { - cat <netdata-uninstaller.sh <&2 "This script will REMOVE netdata from your system." - echo >&2 "Run it again with --force to do it." - exit 1 -fi - -source installer/functions.sh || exit 1 - -echo >&2 "Stopping a possibly running netdata..." -for p in \$(pidof netdata); do run kill \$p; done -sleep 2 - -if [ ! -z "${NETDATA_PREFIX}" -a -d "${NETDATA_PREFIX}" ] - then - # installation prefix was given - - portable_deletedir_recursively_interactively "${NETDATA_PREFIX}" - -else - # installation prefix was NOT given - - if [ -f "${NETDATA_PREFIX}/usr/sbin/netdata" ] - then - echo "Deleting ${NETDATA_PREFIX}/usr/sbin/netdata ..." - run rm -i "${NETDATA_PREFIX}/usr/sbin/netdata" - fi - - portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/etc/netdata" - portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/usr/share/netdata" - portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/usr/libexec/netdata" - portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/var/lib/netdata" - portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/var/cache/netdata" - portable_deletedir_recursively_interactively "${NETDATA_PREFIX}/var/log/netdata" -fi - -if [ -f /etc/logrotate.d/netdata ] - then - echo "Deleting /etc/logrotate.d/netdata ..." - run rm -i /etc/logrotate.d/netdata -fi - -if [ -f /etc/systemd/system/netdata.service ] - then - echo "Deleting /etc/systemd/system/netdata.service ..." - run rm -i /etc/systemd/system/netdata.service -fi - -if [ -f /lib/systemd/system/netdata.service ] - then - echo "Deleting /lib/systemd/system/netdata.service ..." - run rm -i /lib/systemd/system/netdata.service -fi - -if [ -f /etc/init.d/netdata ] - then - echo "Deleting /etc/init.d/netdata ..." - run rm -i /etc/init.d/netdata -fi - -if [ -f /etc/periodic/daily/netdata-updater ] - then - echo "Deleting /etc/periodic/daily/netdata-updater ..." - run rm -i /etc/periodic/daily/netdata-updater -fi - -if [ -f /etc/cron.daily/netdata-updater ] - then - echo "Deleting /etc/cron.daily/netdata-updater ..." - run rm -i /etc/cron.daily/netdata-updater -fi - -portable_check_user_exists netdata -if [ \$? -eq 0 ] - then - echo - echo "You may also want to remove the user netdata" - echo "by running:" - echo " userdel netdata" -fi - -portable_check_group_exists netdata > /dev/null -if [ \$? -eq 0 ] - then - echo - echo "You may also want to remove the group netdata" - echo "by running:" - echo " groupdel netdata" + fi fi -for g in ${NETDATA_ADDED_TO_GROUPS} -do - portable_check_group_exists \$g > /dev/null - if [ \$? -eq 0 ] - then - echo - echo "You may also want to remove the netdata user from the \$g group" - echo "by running:" - echo " gpasswd -d netdata \$g" - fi -done - -UNINSTALL -chmod 750 netdata-uninstaller.sh - # ----------------------------------------------------------------------------- progress "Basic netdata instructions" @@ -1102,186 +986,77 @@ To start netdata run: END -echo >&2 "Uninstall script generated: ${TPUT_RED}${TPUT_BOLD}./netdata-uninstaller.sh${TPUT_RESET}" - -if [ -d .git ] - then - cat >netdata-updater.sh.new <&2 "This script should be run as user with uid \${INSTALL_UID} but it now runs with uid \${UID}" - exit 1 -fi - -# make sure we cd to the working directory -cd "${REINSTALL_PWD}" || exit 1 -# make sure there is .git here -[ \${force} -eq 0 -a ! -d .git ] && echo >&2 "No git structures found at: ${REINSTALL_PWD} (use -f for force re-install)" && exit 1 - -# signal netdata to start saving its database -# this is handy if your database is big -pids=\$(pidof netdata) -do_not_start= -if [ ! -z "\${pids}" ] - then - kill -USR1 \${pids} -else - # netdata is currently not running, so do not start it after updating - do_not_start="--dont-start-it" +if [ "${AUTOUPDATE}" = "1" ]; then + if [ "${UID}" -ne "0" ]; then + echo >&2 "You need to run the installer as root for auto-updating via cron." + else + crondir= + [ -d "/etc/periodic/daily" ] && crondir="/etc/periodic/daily" + [ -d "/etc/cron.daily" ] && crondir="/etc/cron.daily" + + if [ -z "${crondir}" ]; then + echo >&2 "Cannot figure out the cron directory to install netdata-updater" + else + if [ -f "${crondir}/netdata-updater.sh" ]; then + progress "Removing incorrect netdata-updater filename in cron" + rm -f "${crondir}/netdata-updater.sh" + fi + progress "Installing new netdata-updater in cron" + + rm ${installer_dir}/netdata-updater.sh || : #TODO(paulfantom): this workaround should be removed after v1.13.0-rc1. It just needs to be propagated + + rm -f "${crondir}/netdata-updater" + if [ -f "${installer_dir}/packaging/installer/netdata-updater.sh" ]; then + sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${installer_dir}/packaging/installer/netdata-updater.sh" > ${crondir}/netdata-updater || exit 1 + #TODO(paulfantom): Following line is a workaround and should be removed after v1.13.0-rc1. It just needs time to be propagated. + sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${installer_dir}/packaging/installer/netdata-updater.sh" > ${installer_dir}/netdata-updater.sh || exit 1 + else + sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${netdata_source_dir}/packaging/installer/netdata-updater.sh" > ${crondir}/netdata-updater || exit 1 + #TODO(paulfantom): Following line is a workaround and should be removed after v1.13.0-rc1. It just needs time to be propagated. + sed "s|THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT|${NETDATA_USER_CONFIG_DIR}/.environment|" "${netdata_source_dir}/packaging/installer/netdata-updater.sh" > ${installer_source_dir}/netdata-updater.sh || exit 1 + fi + + chmod 0755 ${crondir}/netdata-updater + echo >&2 "Update script is located at ${TPUT_GREEN}${TPUT_BOLD}${crondir}/netdata-updater${TPUT_RESET}" + echo >&2 + echo >&2 "${TPUT_DIM}${TPUT_BOLD}netdata-updater${TPUT_RESET}${TPUT_DIM} works from cron. It will trigger an email from cron" + echo >&2 "only if it fails (it should not print anything when it can update netdata).${TPUT_RESET}" + fi + fi fi -tmp= -if [ -t 2 ] - then - # we are running on a terminal - # open fd 3 and send it to stderr - exec 3>&2 -else - # we are headless - # create a temporary file for the log - tmp=\$(mktemp /tmp/netdata-updater.log.XXXXXX) - # open fd 3 and send it to tmp - exec 3>\${tmp} -fi - -info() { - echo >&3 "\$(date) : INFO: " "\${@}" -} - -emptyline() { - echo >&3 -} - -error() { - echo >&3 "\$(date) : ERROR: " "\${@}" -} - -# this is what we will do if it fails (head-less only) -failed() { - error "FAILED TO UPDATE NETDATA : \${1}" - - if [ ! -z "\${tmp}" ] - then - cat >&2 "\${tmp}" - rm "\${tmp}" - fi - exit 1 -} - -get_latest_commit_id() { - git rev-parse HEAD 2>&3 -} - -update() { - [ -z "\${tmp}" ] && info "Running on a terminal - (this script also supports running headless from crontab)" - - emptyline - - if [ -d .git ] - then - info "Updating netdata source from github..." - - last_commit="\$(get_latest_commit_id)" - [ \${force} -eq 0 -a -z "\${last_commit}" ] && failed "CANNOT GET LAST COMMIT ID (use -f for force re-install)" - - git pull >&3 2>&3 || failed "CANNOT FETCH LATEST SOURCE (use -f for force re-install)" - - new_commit="\$(get_latest_commit_id)" - if [ \${force} -eq 0 ] - then - [ -z "\${new_commit}" ] && failed "CANNOT GET NEW LAST COMMIT ID (use -f for force re-install)" - [ "\${new_commit}" = "\${last_commit}" ] && info "Nothing to be done! (use -f to force re-install)" && exit 0 - fi - elif [ \${force} -eq 0 ] - then - failed "CANNOT FIND GIT STRUCTURES IN \$(pwd) (use -f for force re-install)" - fi - - emptyline - info "Re-installing netdata..." - ${REINSTALL_COMMAND} --dont-wait \${do_not_start} >&3 2>&3 || failed "FAILED TO COMPILE/INSTALL NETDATA" - - [ ! -z "\${tmp}" ] && rm "\${tmp}" && tmp= - return 0 -} +# Save environment variables +cat < ${NETDATA_USER_CONFIG_DIR}/.environment +# Created by installer +PATH="${PATH}" +CFLAGS="${CFLAGS}" +NETDATA_PREFIX="${NETDATA_PREFIX}" +NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS}" +NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS}" +INSTALL_UID="${UID}" +REINSTALL_COMMAND="${REINSTALL_COMMAND}" +# next 3 values are meant to be populated by autoupdater (if enabled) +NETDATA_TARBALL_URL="https://storage.googleapis.com/netdata-nightlies/netdata-latest.tar.gz" +NETDATA_TARBALL_CHECKSUM_URL="https://storage.googleapis.com/netdata-nightlies/sha256sums.txt" +NETDATA_TARBALL_CHECKSUM="new_installation" +EOF -# the installer updates this script - so we run and exit in a single line -update && exit 0 -############################################################################### -############################################################################### -REINSTALL - chmod 755 netdata-updater.sh.new - mv -f netdata-updater.sh.new netdata-updater.sh - echo >&2 "Update script generated : ${TPUT_GREEN}${TPUT_BOLD}./netdata-updater.sh${TPUT_RESET}" - echo >&2 - echo >&2 "${TPUT_DIM}${TPUT_BOLD}netdata-updater.sh${TPUT_RESET}${TPUT_DIM} can work from cron. It will trigger an email from cron" - echo >&2 "only if it fails (it does not print anything when it can update netdata).${TPUT_RESET}" - if [ "${UID}" -eq "0" ] - then - crondir= - [ -d "/etc/periodic/daily" ] && crondir="/etc/periodic/daily" - [ -d "/etc/cron.daily" ] && crondir="/etc/cron.daily" - - if [ ! -z "${crondir}" ] - then - if [ -f "${crondir}/netdata-updater.sh" -a ! -f "${crondir}/netdata-updater" ] - then - # remove .sh from the filename under cron - progress "Fixing netdata-updater filename at cron" - mv -f "${crondir}/netdata-updater.sh" "${crondir}/netdata-updater" - fi - - if [ ! -f "${crondir}/netdata-updater" ] - then - if [ "${AUTOUPDATE}" = "1" ] - then - progress "Installing netdata-updater at cron" - run ln -fs "${PWD}/netdata-updater.sh" "${crondir}/netdata-updater" - else - echo >&2 "${TPUT_DIM}Run this to automatically check and install netdata updates once per day:${TPUT_RESET}" - echo >&2 - echo >&2 "${TPUT_YELLOW}${TPUT_BOLD}sudo ln -fs ${PWD}/netdata-updater.sh ${crondir}/netdata-updater${TPUT_RESET}" - fi - else - progress "Refreshing netdata-updater at cron" - run rm "${crondir}/netdata-updater" - run ln -fs "${PWD}/netdata-updater.sh" "${crondir}/netdata-updater" - fi - else - [ "${AUTOUPDATE}" = "1" ] && echo >&2 "Cannot figure out the cron directory to install netdata-updater." - fi - else - [ "${AUTOUPDATE}" = "1" ] && echo >&2 "You need to run the installer as root for auto-updating via cron." - fi -else - [ -f "netdata-updater.sh" ] && rm "netdata-updater.sh" - [ "${AUTOUPDATE}" = "1" ] && echo >&2 "Your installation method does not support daily auto-updating via cron." +# Opt-out from telemetry program +if [ -n "${NETDATA_DISABLE_TELEMETRY+x}" ]; then + touch ${NETDATA_USER_CONFIG_DIR}/.opt-out-from-anonymous-statistics fi # ----------------------------------------------------------------------------- echo >&2 progress "We are done!" -if [ ${started} -eq 1 ] - then - netdata_banner "is installed and running now!" +if [ ${started} -eq 1 ]; then + netdata_banner "is installed and running now!" else - netdata_banner "is installed now!" + netdata_banner "is installed now!" fi echo >&2 " enjoy real-time performance and health monitoring..." -echo >&2 +echo >&2 exit 0 diff --git a/netdata.spec b/netdata.spec deleted file mode 100644 index 903b8a7f8..000000000 --- a/netdata.spec +++ /dev/null @@ -1,244 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later -%global contentdir %{_datadir}/netdata - -# This is temporary and should eventually be resolved. This bypasses -# the default rhel __os_install_post which throws a python compile -# error. -%global __os_install_post %{nil} - -# -# Conditional build: -%bcond_without systemd # systemd -%bcond_with nfacct # build with nfacct plugin -%bcond_with freeipmi # build with freeipmi plugin -%bcond_with netns # build with netns support (cgroup-network) - -%if 0%{?fedora} || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1140 -%else -%undefine with_systemd -%undefine with_netns -%endif - -%if %{with systemd} -%if 0%{?suse_version} -%global netdata_initd_buildrequires \ -BuildRequires: systemd-rpm-macros \ -%{nil} -%global netdata_initd_requires \ -%{?systemd_requires} \ -%{nil} -%global netdata_init_post %service_add_post netdata.service -%global netdata_init_preun %service_del_preun netdata.service -%global netdata_init_postun %service_del_postun netdata.service -%else -%global netdata_initd_buildrequires \ -BuildRequires: systemd -%global netdata_initd_requires \ -Requires(preun): systemd-units \ -Requires(postun): systemd-units \ -Requires(post): systemd-units \ -%{nil} -%global netdata_init_post %systemd_post netdata.service -%global netdata_init_preun %systemd_preun netdata.service -%global netdata_init_postun %systemd_postun_with_restart netdata.service -%endif -%else -%global netdata_initd_buildrequires %{nil} -%global netdata_initd_requires \ -Requires(post): chkconfig \ -%{nil} -%global netdata_init_post \ -/sbin/chkconfig --add netdata \ -%{nil} -%global netdata_init_preun %{nil} \ -if [ $1 = 0 ]; then \ - /sbin/service netdata stop > /dev/null 2>&1 \ - /sbin/chkconfig --del netdata \ -fi \ -%{nil} -%global netdata_init_postun %{nil} \ -if [ $1 != 0 ]; then \ - /sbin/service netdata condrestart 2>&1 > /dev/null \ -fi \ -%{nil} -%endif - -%if 0%{?_fedora} -%global netdata_recommends \ -Recommends: curl \ -Recommends: iproute-tc \ -Recommends: lm_sensors \ -Recommends: nmap-ncat \ -Recommends: nodejs \ -Recommends: python \ -Recommends: PyYAML \ -Recommends: python2-PyMySQL \ -Recommends: python2-psycopg2 \ -%{nil} -%else -%global netdata_recommends %{nil} -%endif - -Summary: Real-time performance monitoring, done right -Name: netdata -Version: 1.11.0 -Release: 1%{?dist} -License: GPLv3+ -Group: Applications/System -Source0: https://github.com/netdata/%{name}/releases/download/v1.11.1_rolling/%{name}-1.11.1_rolling.tar.gz -URL: http://my-netdata.io -BuildRequires: pkgconfig -BuildRequires: xz -BuildRequires: zlib-devel -BuildRequires: libuuid-devel -Requires: zlib -Requires: libuuid - -# Packages can be found in the EPEL repo -%if %{with nfacct} -BuildRequires: libmnl-devel -BuildRequires: libnetfilter_acct-devel -Requires: libmnl -Requires: libnetfilter_acct -%endif - -%if %{with freeipmi} -BuildRequires: freeipmi-devel -Requires: freeipmi -%endif - -Requires(pre): /usr/sbin/groupadd -Requires(pre): /usr/sbin/useradd -Requires(post): libcap - -%{netdata_initd_buildrequires} -%{netdata_recommends} -%{netdata_initd_requires} - -%description -netdata is the fastest way to visualize metrics. It is a resource -efficient, highly optimized system for collecting and visualizing any -type of realtime timeseries data, from CPU usage, disk activity, SQL -queries, API calls, web site visitors, etc. - -netdata tries to visualize the truth of now, in its greatest detail, -so that you can get insights of what is happening now and what just -happened, on your systems and applications. - -%prep -%setup -q -n netdata-1.11.1_rolling - -%build -%configure \ - --with-zlib \ - --with-math \ - %{?with_nfacct:--enable-plugin-nfacct} \ - %{?with_freeipmi:--enable-plugin-freeipmi} \ - --with-user=netdata -%{__make} %{?_smp_mflags} - -%install -rm -rf "${RPM_BUILD_ROOT}" -%{__make} %{?_smp_mflags} DESTDIR="${RPM_BUILD_ROOT}" install - -find "${RPM_BUILD_ROOT}" -name .keep -delete - -install -m 644 -p system/netdata.conf "${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}" -install -m 755 -d "${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d" -install -m 644 -p system/netdata.logrotate "${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}" - -%if %{with systemd} -install -m 755 -d "${RPM_BUILD_ROOT}%{_unitdir}" -install -m 644 -p system/netdata.service "${RPM_BUILD_ROOT}%{_unitdir}/netdata.service" -%else -# install SYSV init stuff -install -d "${RPM_BUILD_ROOT}/etc/rc.d/init.d" -install -m 755 system/netdata-init-d \ - "${RPM_BUILD_ROOT}/etc/rc.d/init.d/netdata" -%endif - -%pre -getent group netdata >/dev/null || groupadd -r netdata -getent group docker >/dev/null || groupadd -r docker -getent passwd netdata >/dev/null || \ - useradd -r -g netdata -G docker -s /sbin/nologin \ - -d %{contentdir} -c "netdata" netdata - -%post -%{netdata_init_post} - -%preun -%{netdata_init_preun} - -%postun -%{netdata_init_postun} - -%clean -rm -rf "${RPM_BUILD_ROOT}" - -%files -%doc README.md -%defattr(-,root,root) - -%dir %{_sysconfdir}/%{name} -%dir %{_libdir}/%{name} - -%config %{_sysconfdir}/%{name}/*.conf -#%config %{_sysconfdir}/%{name}/charts.d/*.conf -#%config %{_sysconfdir}/%{name}/health.d/*.conf -#%config %{_sysconfdir}/%{name}/node.d/*.conf -#%config %{_sysconfdir}/%{name}/python.d/*.conf -#%config %{_sysconfdir}/%{name}/statsd.d/*.conf -%config %{_sysconfdir}/logrotate.d/%{name} - -%{_libdir}/%{name} -%{_libexecdir}/%{name} -%{_sbindir}/%{name} -%{_sysconfdir}/%{name}/edit-config - -%caps(cap_dac_read_search,cap_sys_ptrace=ep) %attr(0550,root,netdata) %{_libexecdir}/%{name}/plugins.d/apps.plugin - -%if %{with netns} -# cgroup-network detects the network interfaces of CGROUPs -# it must be able to use setns() and run cgroup-network-helper.sh as root -# the helper script reads /proc/PID/fdinfo/* files, runs virsh, etc. -%caps(cap_setuid=ep) %attr(4550,root,netdata) %{_libexecdir}/%{name}/plugins.d/cgroup-network -%attr(0550,root,root) %{_libexecdir}/%{name}/plugins.d/cgroup-network-helper.sh -%endif - -%if %{with freeipmi} -%caps(cap_setuid=ep) %attr(4550,root,netdata) %{_libexecdir}/%{name}/plugins.d/freeipmi.plugin -%endif - -%attr(0770,netdata,netdata) %dir %{_localstatedir}/cache/%{name} -%attr(0770,netdata,netdata) %dir %{_localstatedir}/log/%{name} -%attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name} - -%dir %{_datadir}/%{name} - -%dir %{_sysconfdir}/%{name}/health.d -%dir %{_sysconfdir}/%{name}/python.d -%dir %{_sysconfdir}/%{name}/charts.d -%dir %{_sysconfdir}/%{name}/node.d -%dir %{_sysconfdir}/%{name}/statsd.d - -%dir %{_libdir}/%{name}/conf.d/health.d -%dir %{_libdir}/%{name}/conf.d/python.d -%dir %{_libdir}/%{name}/conf.d/charts.d -%dir %{_libdir}/%{name}/conf.d/node.d -%dir %{_libdir}/%{name}/conf.d/statsd.d - -%if %{with systemd} -%{_unitdir}/netdata.service -%else -%{_sysconfdir}/rc.d/init.d/netdata -%endif - -# Enforce 0644 for files and 0755 for directories -# for the netdata web directory -%defattr(0644,root,netdata,0755) -%{_datadir}/%{name}/web - -%changelog -* Sun Nov 15 2015 Alon Bar-Lev - 0.0.0-1 -- Initial add. diff --git a/netdata.spec.in b/netdata.spec.in index b54a91e9f..5db24bd72 100644 --- a/netdata.spec.in +++ b/netdata.spec.in @@ -80,8 +80,8 @@ Recommends: python2-psycopg2 \ %endif Summary: Real-time performance monitoring, done right -Name: @PACKAGE_NAME@ -Version: @PACKAGE_RPM_VERSION@ +Name: netdata +Version: 1.12.0 Release: 1%{?dist} License: GPLv3+ Group: Applications/System @@ -129,6 +129,7 @@ happened, on your systems and applications. %setup -q -n @PACKAGE_NAME@-@PACKAGE_VERSION@ %build +autoreconf -i %configure \ --with-zlib \ --with-math \ @@ -211,7 +212,7 @@ rm -rf "${RPM_BUILD_ROOT}" %endif %attr(0770,netdata,netdata) %dir %{_localstatedir}/cache/%{name} -%attr(0770,netdata,netdata) %dir %{_localstatedir}/log/%{name} +%attr(0755,netdata,root) %dir %{_localstatedir}/log/%{name} %attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name} %dir %{_datadir}/%{name} @@ -225,7 +226,7 @@ rm -rf "${RPM_BUILD_ROOT}" %dir %{_libdir}/%{name}/conf.d/health.d %dir %{_libdir}/%{name}/conf.d/python.d %dir %{_libdir}/%{name}/conf.d/charts.d -%dir %{_libdir}/%{name}/conf.d/node.d +#%dir %{_libdir}/%{name}/conf.d/node.d %dir %{_libdir}/%{name}/conf.d/statsd.d %if %{with systemd} @@ -240,5 +241,12 @@ rm -rf "${RPM_BUILD_ROOT}" %{_datadir}/%{name}/web %changelog +* Wed Jan 02 2019 Pawel Krupa - 0.0.0-3 +- Temporary set version statically +- Fix changelog ordering +- Comment-out node.d configuration directory +* Wed Jan 02 2019 Pawel Krupa - 0.0.0-2 +- Fix permissions for log files * Sun Nov 15 2015 Alon Bar-Lev - 0.0.0-1 - Initial add. + diff --git a/netlify.toml b/netlify.toml new file mode 100644 index 000000000..927941240 --- /dev/null +++ b/netlify.toml @@ -0,0 +1,12 @@ +# Settings in the [build] context are global and are applied to all contexts +# unless otherwise overridden by more specific contexts. +[build] + # Directory to change to before starting a build. + base = "/docs/generator" + + # Directory (relative to root of your repo) that contains the deploy-ready + # HTML files and assets generated by the build. + publish = "docs/generator/build" + + # Default build command. + command = "./buildhtml.sh" diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile new file mode 100644 index 000000000..73cd9030f --- /dev/null +++ b/packaging/docker/Dockerfile @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# author : paulfantom + +# Cross-arch building is achieved by specifying ARCH as a build parameter with `--build-arg` option. +# It is automated in `build.sh` script +ARG ARCH=amd64-v3.8 +FROM multiarch/alpine:${ARCH} as builder + +ARG OUTPUT="/dev/stdout" +# Install prerequisites +RUN apk --no-cache add alpine-sdk \ + autoconf \ + automake \ + bash \ + build-base \ + curl \ + jq \ + libmnl-dev \ + libuuid \ + lm_sensors \ + netcat-openbsd \ + nodejs \ + pkgconfig \ + py-mysqldb \ + py-psycopg2 \ + py-yaml \ + python \ + util-linux-dev \ + zlib-dev + +# Copy source +COPY . /opt/netdata.git +WORKDIR /opt/netdata.git + +# Install from source +RUN chmod +x netdata-installer.sh && \ + ./netdata-installer.sh --dont-wait --dont-start-it &>${OUTPUT} + +# files to one directory +RUN mkdir -p /app/usr/sbin/ \ + /app/usr/share \ + /app/usr/libexec \ + /app/usr/lib \ + /app/var/cache \ + /app/var/lib \ + /app/etc && \ + mv /usr/share/netdata /app/usr/share/ && \ + mv /usr/libexec/netdata /app/usr/libexec/ && \ + mv /usr/lib/netdata /app/usr/lib/ && \ + mv /var/cache/netdata /app/var/cache/ && \ + mv /var/lib/netdata /app/var/lib/ && \ + mv /etc/netdata /app/etc/ && \ + mv /usr/sbin/netdata /app/usr/sbin/ && \ + mv packaging/docker/run.sh /app/usr/sbin/ && \ + chmod +x /app/usr/sbin/run.sh + +##################################################################### +ARG ARCH +FROM multiarch/alpine:${ARCH} + +# Install some prerequisites +RUN apk --no-cache add curl \ + fping \ + jq \ + libuuid \ + lm_sensors \ + netcat-openbsd \ + nodejs \ + py-mysqldb \ + py-psycopg2 \ + py-yaml \ + python + +# Conditional subscribiton to Polyverse's Polymorphic Linux repositories +RUN if [ "$(uname -m)" == "x86_64" ]; then \ + curl https://sh.polyverse.io | sh -s install gcxce5byVQbtRz0iwfGkozZwy support+netdata@polyverse.io; \ + apk update; \ + apk upgrade --available --no-cache; \ + sed -in 's/^#//g' /etc/apk/repositories; \ + fi + + +# Copy files over +COPY --from=builder /app / + +# Configure system +ARG NETDATA_UID=201 +ARG NETDATA_GID=201 +RUN \ + # fping from alpine apk is on a different location. Moving it. + mv /usr/sbin/fping /usr/local/bin/fping && \ + chmod 4755 /usr/local/bin/fping && \ + mkdir -p /var/log/netdata && \ + # Add netdata user + addgroup -g ${NETDATA_GID} -S netdata && \ + adduser -S -H -s /usr/sbin/nologin -u ${NETDATA_GID} -h /etc/netdata -G netdata netdata && \ + # Apply the permissions as described in + # https://github.com/netdata/netdata/wiki/netdata-security#netdata-directories + chown -R root:netdata /etc/netdata && \ + chown -R netdata:netdata /var/cache/netdata /var/lib/netdata /usr/share/netdata && \ + chown -R root:netdata /usr/lib/netdata && \ + chown -R root:netdata /usr/libexec/netdata/plugins.d/apps.plugin /usr/libexec/netdata/plugins.d/cgroup-network && \ + chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network /usr/libexec/netdata/plugins.d/apps.plugin && \ + chmod 0750 /var/lib/netdata /var/cache/netdata && \ + # Link log files to stdout + ln -sf /dev/stdout /var/log/netdata/access.log && \ + ln -sf /dev/stdout /var/log/netdata/debug.log && \ + ln -sf /dev/stderr /var/log/netdata/error.log + +ENV NETDATA_PORT 19999 +EXPOSE $NETDATA_PORT + +ENTRYPOINT ["/usr/sbin/run.sh"] diff --git a/packaging/docker/README.md b/packaging/docker/README.md new file mode 100644 index 000000000..dba0fa0e6 --- /dev/null +++ b/packaging/docker/README.md @@ -0,0 +1,126 @@ +# Install netdata with Docker + +> :warning: As of Sep 9th, 2018 we ship [new docker builds](https://github.com/netdata/netdata/pull/3995), running netdata in docker with an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) directive, not a COMMAND directive. Please adapt your execution scripts accordingly. You can find more information about ENTRYPOINT vs COMMAND is presented by goinbigdata [here](http://goinbigdata.com/docker-run-vs-cmd-vs-entrypoint/) and by docker docs [here](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). +> +> Also, the `latest` is now based on alpine, so **`alpine` is not updated any more** and `armv7hf` is now replaced with `armhf` (to comply with https://github.com/multiarch naming), so **`armv7hf` is not updated** either. + +## Limitations + +Running netdata in a container for monitoring the whole host, can limit its capabilities. Some data is not accessible or not as detailed as when running netdata on the host. + +## Package scrambling in runtime (x86_64 only) + +By default on x86_64 architecture our docker images use Polymorphic Polyverse Linux package scrambling. For increased security you can enable rescrambling of packages during runtime. To do this set environment variable `RESCRAMBLE=true` while starting netdata docker container. + +For more information go to [Polyverse site](https://polyverse.io/how-it-works/) + +## Run netdata with docker command + +Quickly start netdata with the docker command line. +Netdata is then available at http://host:19999 + +This is good for an internal network or to quickly analyse a host. + +```bash +docker run -d --name=netdata \ + -p 19999:19999 \ + -v /proc:/host/proc:ro \ + -v /sys:/host/sys:ro \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + --cap-add SYS_PTRACE \ + --security-opt apparmor=unconfined \ + netdata/netdata +``` + +The above can be converted to docker-compose file for ease of management: + +```yaml +version: '3' +services: + netdata: + image: netdata/netdata + hostname: example.com # set to fqdn of host + ports: + - 19999:19999 + cap_add: + - SYS_PTRACE + security_opt: + - apparmor:unconfined + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /var/run/docker.sock:/var/run/docker.sock:ro +``` + +### Docker container names resolution + +If you want to have your container names resolved by netdata it needs to have access to docker group. To achive that just add environment variable `PGID=999` to netdata container, where `999` is a docker group id from your host. This number can be found by running: +```bash +grep docker /etc/group | cut -d ':' -f 3 +``` + +### Pass command line options to Netdata + +Since we use an [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) directive, you can provide [netdata daemon command line options](https://docs.netdata.cloud/daemon/#command-line-options) such as the IP address netdata will be running on, using the [command instruction](https://docs.docker.com/engine/reference/builder/#cmd). + +## Install Netdata using Docker Compose with SSL/TLS enabled http proxy + +For a permanent installation on a public server, you should [secure the netdata instance](../../docs/netdata-security.md). This section contains an example of how to install netdata with an SSL reverse proxy and basic authentication. + +You can use use the following docker-compose.yml and Caddyfile files to run netdata with docker. Replace the Domains and email address for [Letsencrypt](https://letsencrypt.org/) before starting. + +### Prerequisites +* [Docker](https://docs.docker.com/install/#server) +* [Docker Compose](https://docs.docker.com/compose/install/) +* Domain configured in DNS pointing to host. + +### Caddyfile + +This file needs to be placed in /opt with name `Caddyfile`. Here you customize your domain and you need to provide your email address to obtain a Letsencrypt certificate. Certificate renewal will happen automatically and will be executed internally by the caddy server. + +``` +netdata.example.org { + proxy / netdata:19999 + tls admin@example.org +} +``` + +### docker-compose.yml + +After setting Caddyfile run this with `docker-compose up -d` to have fully functioning netdata setup behind HTTP reverse proxy. + +```yaml +version: '3' +volumes: + caddy: + +services: + caddy: + image: abiosoft/caddy + ports: + - 80:80 + - 443:443 + volumes: + - /opt/Caddyfile:/etc/Caddyfile + - caddy:/root/.caddy + environment: + ACME_AGREE: 'true' + netdata: + restart: always + hostname: netdata.example.org + image: netdata/netdata + cap_add: + - SYS_PTRACE + security_opt: + - apparmor:unconfined + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /var/run/docker.sock:/var/run/docker.sock:ro +``` + +### Restrict access with basic auth + +You can restrict access by following [official caddy guide](https://caddyserver.com/docs/basicauth) and adding lines to Caddyfile. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fdocker%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/docker/build.sh b/packaging/docker/build.sh new file mode 100755 index 000000000..6958f05e8 --- /dev/null +++ b/packaging/docker/build.sh @@ -0,0 +1,83 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-3.0-or-later +# Author : Pawel Krupa (paulfantom) +# Cross-arch docker build helper script +# Needs docker in version >18.02 due to usage of manifests + +set -e + +if [ ! -f .gitignore ]; then + echo "Run as ./packaging/docker/$(basename "$0") from top level directory of git repository" + exit 1 +fi + +if [ "$1" == "" ]; then + VERSION=$(git tag --points-at) +else + VERSION="$1" +fi +if [ "${VERSION}" == "" ]; then + VERSION="latest" +fi + +declare -A ARCH_MAP +ARCH_MAP=( ["i386"]="386" ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64") +if [ -z ${DEVEL+x} ]; then + declare -a ARCHITECTURES=(i386 armhf aarch64 amd64) +else + declare -a ARCHITECTURES=(amd64) + unset DOCKER_PASSWORD + unset DOCKER_USERNAME +fi + +REPOSITORY="${REPOSITORY:-netdata}" +echo "Building ${VERSION} of ${REPOSITORY} container" + +docker run --rm --privileged multiarch/qemu-user-static:register --reset + +# Build images using multi-arch Dockerfile. +for ARCH in "${ARCHITECTURES[@]}"; do + eval docker build \ + --build-arg ARCH="${ARCH}-v3.8" \ + --build-arg OUTPUT=/dev/null \ + --tag "${REPOSITORY}:${VERSION}-${ARCH}" \ + --file packaging/docker/Dockerfile ./ +done + +# There is no reason to continue if we cannot log in to docker hub +if [ -z ${DOCKER_USERNAME+x} ] || [ -z ${DOCKER_PASSWORD+x} ]; then + echo "No docker hub username or password specified. Exiting without pushing images to registry" + exit 0 +fi + +# Create temporary docker CLI config with experimental features enabled (manifests v2 need it) +mkdir -p /tmp/docker +echo '{"experimental":"enabled"}' > /tmp/docker/config.json + +# Login to docker hub to allow futher operations +echo "$DOCKER_PASSWORD" | docker --config /tmp/docker login -u "$DOCKER_USERNAME" --password-stdin + +# Push images to registry +for ARCH in amd64 i386 armhf aarch64; do + docker --config /tmp/docker push "${REPOSITORY}:${VERSION}-${ARCH}" & +done +wait + +# Recreate docker manifest +docker --config /tmp/docker manifest create --amend \ + "${REPOSITORY}:${VERSION}" \ + "${REPOSITORY}:${VERSION}-i386" \ + "${REPOSITORY}:${VERSION}-armhf" \ + "${REPOSITORY}:${VERSION}-aarch64" \ + "${REPOSITORY}:${VERSION}-amd64" + +# Annotate manifest with CPU architecture information +for ARCH in i386 armhf aarch64 amd64; do + docker --config /tmp/docker manifest annotate "${REPOSITORY}:${VERSION}" "${REPOSITORY}:${VERSION}-${ARCH}" --os linux --arch "${ARCH_MAP[$ARCH]}" +done + +# Push manifest to docker hub +docker --config /tmp/docker manifest push -p "${REPOSITORY}:${VERSION}" + +# Show current manifest (debugging purpose only) +docker --config /tmp/docker manifest inspect "${REPOSITORY}:${VERSION}" diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh new file mode 100644 index 000000000..243cae8a2 --- /dev/null +++ b/packaging/docker/run.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +#set -e + +if [ ${RESCRAMBLE+x} ]; then + echo "Reinstalling all packages to get the latest Polymorphic Linux scramble" + apk upgrade --update-cache --available +fi + +if [ ${PGID+x} ]; then + echo "Adding user netdata to group with id ${PGID}" + addgroup -g "${PGID}" -S hostgroup 2>/dev/null + sed -i "s/${PGID}:$/${PGID}:netdata/g" /etc/group +fi + +exec /usr/sbin/netdata -u netdata -D -s /host -p "${NETDATA_PORT}" "$@" diff --git a/packaging/go.d.checksums b/packaging/go.d.checksums new file mode 100644 index 000000000..602852efc --- /dev/null +++ b/packaging/go.d.checksums @@ -0,0 +1,16 @@ +ef1d47b5e36d48c5cc99a837899d74625aec6c5e7a6d810254c56f9e58b9463f *config.tar.gz +ac4df4040e4b1c1f55613e9c9ea0cea8100a36669ff976e30e90dbb7968337ff *go.d.plugin-v0.0.2.darwin-386 +f0a5938df322336a36c177972d3d328c4fbad927a0abc6edbc7159537a7da870 *go.d.plugin-v0.0.2.darwin-amd64 +aff5a560d2acc5717ee83cf5751062d704050e9a993968c093de284c313f0390 *go.d.plugin-v0.0.2.freebsd-386 +8a0abf3901b25fc37a7c65b931e3a7e4386b46c2b0c37d1f77c05f67eb68c1e9 *go.d.plugin-v0.0.2.freebsd-amd64 +b4a1715435983e60fefed1ca016fa55831ebfed419298cd93961d13a8ce8ee53 *go.d.plugin-v0.0.2.freebsd-arm +e85e6bc0614d625d2c3f5d89182a640c3adabdb7ad9f2ad6a6d1d0fcef8d8761 *go.d.plugin-v0.0.2.linux-386 +f8b7d17402cfebb20431a2dedb9a7a1097a6be379a6eb187f9df4f39d69dc286 *go.d.plugin-v0.0.2.linux-amd64 +da9a1f5d083c09c644e5234ad73b523202fca5e5872645f8e3d33e3a01b11e71 *go.d.plugin-v0.0.2.linux-arm +bc8d834840a723472ad116c7a44c5b93dd770356810912ca86cdcb517de076d8 *go.d.plugin-v0.0.2.linux-arm64 +556ec76fea17922ac413916f16061973ad20997cdce18be9ba6da22ddcc4d82d *go.d.plugin-v0.0.2.linux-mips +9197b386863a48b9c00138fec885049448a5b85582db4e91668d5e21ef207b1a *go.d.plugin-v0.0.2.linux-mips64 +531795a69e5e6f2243a5ee19ed3221e7c6dfdcd690066af678fda505c8b3d81d *go.d.plugin-v0.0.2.linux-mips64le +bb07367de065958ac429c694acb638c6151a80685081be74aafb65bd72a86022 *go.d.plugin-v0.0.2.linux-mipsle +b78fa18407b3be1aa29657ea793fec5050c7fefc75ccc88168c7c2d4d4313def *go.d.plugin-v0.0.2.linux-ppc64 +50d37f290ebe0df8d08acff6c8f3f8df0c95d619ebe8b7f592973410b42a23ac *go.d.plugin-v0.0.2.linux-ppc64le diff --git a/packaging/installer/.keep b/packaging/installer/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/packaging/installer/README.md b/packaging/installer/README.md new file mode 100644 index 000000000..eb507a5fc --- /dev/null +++ b/packaging/installer/README.md @@ -0,0 +1,413 @@ +# Installation + +Netdata is a **monitoring agent**. It is designed to be installed and run on all your systems: **physical** and **virtual** servers, **containers**, even **IoT**. + +The best way to install Netdata is directly from source. Our **automatic installer** will install any required system packages and compile Netdata directly on your systems. + +!!! warning + You can find Netdata packages distributed by third parties. In many cases, these packages are either too old or broken. So, the suggested ways to install Netdata are the ones in this page. + **We are currently working to provide our binary packages for all Linux distros.** Stay tuned... + +1. [Automatic one line installation](#one-line-installation), easy installation from source, **this is the default** +2. [Install pre-built static binary on any 64bit Linux](#linux-64bit-pre-built-static-binary) +3. [Run Netdata in a docker container](#run-netdata-in-a-docker-container) +4. [Manual installation, step by step](#install-netdata-on-linux-manually) +5. [Install on FreeBSD](#freebsd) +6. [Install on pfSense](#pfsense) +7. [Enable on FreeNAS Corral](#freenas) +8. [Install on macOS (OS X)](#macos) + +See also the list of Netdata [package maintainers](../maintainers) for ASUSTOR NAS, OpenWRT, ReadyNAS, etc. + +--- + +## One line installation + +> This method is **fully automatic on all Linux** distributions. FreeBSD and MacOS systems need some preparations before installing Netdata for the first time. Check the [FreeBSD](#freebsd) and the [MacOS](#macos) sections for more information. + +To install Netdata from source and keep it up to date automatically, run the following: + +```bash +bash <(curl -Ss https://my-netdata.io/kickstart.sh) +``` + +*(do not `sudo` this command, it will do it by itself as needed)* + +![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) + +
Click here for more information and advanced use of this command. + + 
+Verify the integrity of the script with this: + +```bash +[ "b4632ca6c651de0f667e6d4f6e1015fe" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" +``` +*It should print `OK, VALID` if the script is the one we ship.* + +The `kickstart.sh` script: + +- detects the Linux distro and **installs the required system packages** for building Netdata (will ask for confirmation) +- downloads the latest Netdata source tree to `/usr/src/netdata.git`. +- installs Netdata by running `./netdata-installer.sh` from the source tree. +- installs `netdata-updater.sh` to `cron.daily`, so your Netdata installation will be updated daily (you will get a message from cron only if the update fails). +- For QA purposes, this installation method lets us know if it succeed or failed. + +The `kickstart.sh` script passes all its parameters to `netdata-installer.sh`, so you can add more parameters to change the installation directory, enable/disable plugins, etc (check below). + +For automated installs, append a space + `--dont-wait` to the command line. You can also append `--dont-start-it` to prevent the installer from starting Netdata. Example: + +```bash + bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait --dont-start-it +``` + +If you don't want to receive automatic updates, add `--no-updates` when executing `kickstart.sh` script. + +
 
+ +Once Netdata is installed, see [Getting Started](../../docs/GettingStarted.md). + +--- + +## Linux 64bit pre-built static binary + +You can install a pre-compiled static binary of Netdata on any Intel/AMD 64bit Linux system +(even those that don't have a package manager, like CoreOS, CirrOS, busybox systems, etc). +You can also use these packages on systems with broken or unsupported package managers. + +To install Netdata with a binary package on any Linux distro, any kernel version - for **Intel/AMD 64bit** hosts, run the following: + +```bash + + bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) + +``` + +*(do not `sudo` this command, it will do it by itself as needed; if the target system does not have `bash` installed, see below for instructions to run it without `bash`)* + +![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart64&group=sum&after=-86400&label=today&units=installations&precision=0) + +> The static builds install Netdata at **`/opt/netdata`** + +
Click here for more information and advanced use of this command. + + 
+Verify the integrity of the script with this: + +```bash +[ "ac8e5cf25399b08c42d37e1a53e1a6d3" = "$(curl -Ss https://my-netdata.io/kickstart-static64.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" +``` + +*It should print `OK, VALID` if the script is the one we ship.* + +For automated installs, append a space + `--dont-wait` to the command line. You can also append `--dont-start-it` to prevent the installer from starting Netdata. + +Example: + +```bash + + bash <(curl -Ss https://my-netdata.io/kickstart-static64.sh) --dont-wait --dont-start-it + +``` + +If your shell fails to handle the above one liner, do this: + +```bash +# download the script with curl +curl https://my-netdata.io/kickstart-static64.sh >/tmp/kickstart-static64.sh + +# or, download the script with wget +wget -O /tmp/kickstart-static64.sh https://my-netdata.io/kickstart-static64.sh + +# run the downloaded script (any sh is fine, no need for bash) +sh /tmp/kickstart-static64.sh +``` + +- The static binary files are kept in repo [binary-packages](https://github.com/netdata/binary-packages). You can download any of the `.run` files, and run it. These files are self-extracting shell scripts built with [makeself](https://github.com/megastep/makeself). +- The target system does **not** need to have bash installed. +- The same files can be used for updates too. +- For QA purposes, this installation method lets us know if it succeed or failed. + +
 
+ +Once Netdata is installed, see [Getting Started](../../docs/GettingStarted.md). + +--- + +## Run Netdata in a Docker container + +You can [Install Netdata with Docker](../docker/#install-netdata-with-docker). + +--- + +## Install Netdata on Linux manually + +To install the latest git version of Netdata, please follow these 2 steps: + +1. [Prepare your system](#prepare-your-system) + + Install the required packages on your system. + +2. [Install Netdata](#install-netdata) + + Download and install Netdata. You can also update it the same way. + +--- + +### Prepare your system + +Try our experimental automatic requirements installer (no need to be root). This will try to find the packages that should be installed on your system to build and run Netdata. It supports most major Linux distributions released after 2010: + +- **Alpine** Linux and its derivatives (you have to install `bash` yourself, before using the installer) +- **Arch** Linux and its derivatives +- **Gentoo** Linux and its derivatives +- **Debian** Linux and its derivatives (including **Ubuntu**, **Mint**) +- **Fedora** and its derivatives (including **Red Hat Enterprise Linux**, **CentOS**, **Amazon Machine Image**) +- **SuSe** Linux and its derivatives (including **openSuSe**) +- **SLE12** Must have your system registered with Suse Customer Center or have the DVD. See [#1162](https://github.com/netdata/netdata/issues/1162) + +Install the packages for having a **basic Netdata installation** (system monitoring and many applications, without `mysql` / `mariadb`, `postgres`, `named`, hardware sensors and `SNMP`): + +```sh +curl -Ss 'https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh' >/tmp/kickstart.sh && bash /tmp/kickstart.sh -i netdata +``` + +Install all the required packages for **monitoring everything Netdata can monitor**: + +```sh +curl -Ss 'https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh' >/tmp/kickstart.sh && bash /tmp/kickstart.sh -i netdata-all +``` + +If the above do not work for you, please [open a github issue](https://github.com/netdata/netdata/issues/new?title=packages%20installer%20failed&labels=installation%20help&body=The%20experimental%20packages%20installer%20failed.%0A%0AThis%20is%20what%20it%20says:%0A%0A%60%60%60txt%0A%0Aplease%20paste%20your%20screen%20here%0A%0A%60%60%60) with a copy of the message you get on screen. We are trying to make it work everywhere (this is also why the script [reports back](https://github.com/netdata/netdata/issues/2054) success or failure for all its runs). + +--- + +This is how to do it by hand: + +```sh +# Debian / Ubuntu +apt-get install zlib1g-dev uuid-dev libmnl-dev gcc make git autoconf autoconf-archive autogen automake pkg-config curl + +# Fedora +dnf install zlib-devel libuuid-devel libmnl-devel gcc make git autoconf autoconf-archive autogen automake pkgconfig curl findutils + +# CentOS / Red Hat Enterprise Linux +yum install autoconf automake curl gcc git libmnl-devel libuuid-devel lm_sensors make MySQL-python nc pkgconfig python python-psycopg2 PyYAML zlib-devel + +``` + +Please note that for RHEL/CentOS you might need [EPEL](http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/). + +Once Netdata is compiled, to run it the following packages are required (already installed using the above commands): + +package|description +:-----:|----------- +`libuuid`|part of `util-linux` for GUIDs management +`zlib`|gzip compression for the internal Netdata web server + +*Netdata will fail to start without the above.* + +Netdata plugins and various aspects of Netdata can be enabled or benefit when these are installed (they are optional): + +package|description +:-----:|----------- +`bash`|for shell plugins and **alarm notifications** +`curl`|for shell plugins and **alarm notifications** +`iproute` or `iproute2`|for monitoring **Linux traffic QoS**
use `iproute2` if `iproute` reports as not available or obsolete +`python`|for most of the external plugins +`python-yaml`|used for monitoring **beanstalkd** +`python-beanstalkc`|used for monitoring **beanstalkd** +`python-dnspython`|used for monitoring DNS query time +`python-ipaddress`|used for monitoring **DHCPd**
this package is required only if the system has python v2. python v3 has this functionality embedded +`python-mysqldb`
or
`python-pymysql`|used for monitoring **mysql** or **mariadb** databases
`python-mysqldb` is a lot faster and thus preferred +`python-psycopg2`|used for monitoring **postgresql** databases +`python-pymongo`|used for monitoring **mongodb** databases +`nodejs`|used for `node.js` plugins for monitoring **named** and **SNMP** devices +`lm-sensors`|for monitoring **hardware sensors** +`libmnl`|for collecting netfilter metrics +`netcat`|for shell plugins to collect metrics from remote systems + +*Netdata will greatly benefit if you have the above packages installed, but it will still work without them.* + +--- + +### Install Netdata + +Do this to install and run Netdata: + +```sh + +# download it - the directory 'netdata' will be created +git clone https://github.com/netdata/netdata.git --depth=100 +cd netdata + +# run script with root privileges to build, install, start Netdata +./netdata-installer.sh + +``` + +* If you don't want to run it straight-away, add `--dont-start-it` option. + +* If you don't want to install it on the default directories, you can run the installer like this: `./netdata-installer.sh --install /opt`. This one will install Netdata in `/opt/netdata`. + +Once the installer completes, the file `/etc/netdata/netdata.conf` will be created (if you changed the installation directory, the configuration will appear in that directory too). + +You can edit this file to set options. One common option to tweak is `history`, which controls the size of the memory database Netdata will use. By default is `3600` seconds (an hour of data at the charts) which makes Netdata use about 10-15MB of RAM (depending on the number of charts detected on your system). Check **[[Memory Requirements]]**. + +To apply the changes you made, you have to restart Netdata. + +--- + +## Other Systems + + + +##### FreeBSD + +You can install Netdata from ports or packages collection. + +This is how to install the latest Netdata version from sources on FreeBSD: + +```sh +# install required packages +pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof + +# download Netdata +git clone https://github.com/netdata/netdata.git --depth=100 + +# install Netdata in /opt/netdata +cd netdata +./netdata-installer.sh --install /opt +``` + +##### pfSense +To install Netdata on pfSense run the following commands (within a shell or under Diagnostics/Command Prompt within the pfSense web interface). + +Change platform (i386/amd64, etc) and FreeBSD versions (10/11, etc) according to your environment and change Netdata version (1.10.0 in example) according to latest version present within the FreeSBD repository:- + +Note first three packages are downloaded from the pfSense repository for maintaining compatibility with pfSense, Netdata is downloaded from the FreeBSD repository. +``` +pkg install pkgconf +pkg install bash +pkg install e2fsprogs-libuuid +pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/netdata-1.11.0.txz +``` +To start Netdata manually run `service netdata onestart` + +To start Netdata automatically at each boot add `service netdata start` as a Shellcmd within the pfSense web interface (under **Services/Shellcmd**, which you need to install beforehand under **System/Package Manager/Available Packages**). +Shellcmd Type should be set to `Shellcmd`. +![](https://user-images.githubusercontent.com/36808164/36930790-4db3aa84-1f0d-11e8-8752-cdc08bb7207c.png) +Alternatively more information can be found in https://doc.pfsense.org/index.php/Installing_FreeBSD_Packages, for achieving the same via the command line and scripts. + +If you experience an issue with `/usr/bin/install` absense on pfSense 2.3 or earlier, update pfSense or use workaround from [https://redmine.pfsense.org/issues/6643](https://redmine.pfsense.org/issues/6643) + +##### FreeNAS +On FreeNAS-Corral-RELEASE (>=10.0.3), Netdata is pre-installed. + +To use Netdata, the service will need to be enabled and started from the FreeNAS **[CLI](https://github.com/freenas/cli)**. + +To enable the Netdata service: +``` +service netdata config set enable=true +``` + +To start the netdata service: +``` +service netdata start +``` + +##### macOS + +Netdata on macOS still has limited charts, but external plugins do work. + +You can either install Netdata with [Homebrew](https://brew.sh/) + +```sh +brew install netdata +``` + +or from source: + +```sh +# install Xcode Command Line Tools +xcode-select --install +``` +click `Install` in the software update popup window, then +```sh +# install HomeBrew package manager +/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" + +# install required packages +brew install ossp-uuid autoconf automake pkg-config + +# download Netdata +git clone https://github.com/netdata/netdata.git --depth=100 + +# install Netdata in /usr/local/netdata +cd netdata +sudo ./netdata-installer.sh --install /usr/local +``` + +The installer will also install a startup plist to start Netdata when your Mac boots. + +##### Alpine 3.x + +Execute these commands to install Netdata in Alpine Linux 3.x: + +``` +# install required packages +apk add alpine-sdk bash curl zlib-dev util-linux-dev libmnl-dev gcc make git autoconf automake pkgconfig python logrotate + +# if you plan to run node.js Netdata plugins +apk add nodejs + +# download Netdata - the directory 'netdata' will be created +git clone https://github.com/netdata/netdata.git --depth=100 +cd netdata + + +# build it, install it, start it +./netdata-installer.sh + + +# make Netdata start at boot +echo -e "#!/usr/bin/env bash\n/usr/sbin/netdata" >/etc/local.d/netdata.start +chmod 755 /etc/local.d/netdata.start + +# make Netdata stop at shutdown +echo -e "#!/usr/bin/env bash\nkillall netdata" >/etc/local.d/netdata.stop +chmod 755 /etc/local.d/netdata.stop + +# enable the local service to start automatically +rc-update add local +``` + +##### Synology + +The documentation previously recommended installing the Debian Chroot package from the Synology community package sources and then running Netdata from within the chroot. This does not work, as the chroot environment does not have access to `/proc`, and therefore exposes very few metrics to Netdata. Additionally, [this issue](https://github.com/SynoCommunity/spksrc/issues/2758), still open as of 2018/06/24, indicates that the Debian Chroot package is not suitable for DSM versions greater than version 5 and may corrupt system libraries and render the NAS unable to boot. + +The good news is that the 64-bit static installer works fine if your NAS is one that uses the amd64 architecture. It will install the content into `/opt/netdata`, making future removal safe and simple. + +When Netdata is first installed, it will run as _root_. This may or may not be acceptable for you, and since other installations run it as the _netdata_ user, you might wish to do the same. This requires some extra work: + +1. Creat a group `netdata` via the Synology group interface. Give it no access to anything. +2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign the user to the `netdata` group. Netdata will chuid to this user when running. +3. Change ownership of the following directories, as defined in [Netdata Security](../../docs/netdata-security.md#security-design): + +``` +$ chown -R root:netdata /opt/netdata/usr/share/netdata +$ chown -R netdata:netdata /opt/netdata/var/lib/netdata /opt/netdata/var/cache/netdata +$ chown -R netdata:root /opt/netdata/var/log/netdata +``` + +Additionally, as of 2018/06/24, the Netdata installer doesn't recognize DSM as an operating system, so no init script is installed. You'll have to do this manually: + +1. Add [this file](https://gist.github.com/oskapt/055d474d7bfef32c49469c1b53e8225f) as `/etc/rc.netdata`. Make it executable with `chmod 0755 /etc/rc.netdata`. +2. Edit `/etc/rc.local` and add a line calling `/etc/rc.netdata` to have it start on boot: + +``` +# Netdata startup +[ -x /etc/rc.netdata ] && /etc/rc.netdata start +``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/installer/UNINSTALL.md b/packaging/installer/UNINSTALL.md new file mode 100644 index 000000000..511693b48 --- /dev/null +++ b/packaging/installer/UNINSTALL.md @@ -0,0 +1,22 @@ +# Uninstalling netdata + +Our self-contained uninstaller is able to remove netdata installations created with shell installer. It doesn't need any other netdata repository files to be run. All it needs is an .environment file, which is created during installation (with shell installer) and put in ${NETDATA_USER_CONFIG_DIR}/.environment (by default /etc/netdata/.environment). That file contains some parameters which are passed to our installer and which are needed during uninstallation process. Mainly two parameters are needed: +``` +NETDATA_PREFIX +NETDATA_ADDED_TO_GROUPS +``` + +A workflow for uninstallation looks like this: + +1. Find your .environment file +2. If you cannot find that file and would like to uninstall netdata, then create new file with following content: +``` +NETDATA_PREFIX="" # put what you used as a parameter to shell installed `--install` flag. Otherwise it should be empty +NETDATA_ADDED_TO_GROUPS="" # Additional groups for a user running netdata process +``` +3. Download [netdata-uninstaller.sh](https://github.com/netdata/netdata/blob/master/packaging/installer/netdata-uninstaller.sh) and run it as follows: `netdata-uninstaller.sh --yes --env `. The default `path_to_environment_file` is `/etc/netdata`, it's the location of the file `.environment` that is used by the uninstaller. + + +Note: This uninstallation method assumes previous installation with netdata-installer.sh or kickstart script. Currently using it when netdata was installed by a package manager can work or cause unexpected results. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FUNINSTALL&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/installer/UPDATE.md b/packaging/installer/UPDATE.md new file mode 100644 index 000000000..7c0be8323 --- /dev/null +++ b/packaging/installer/UPDATE.md @@ -0,0 +1,55 @@ +# Updating netdata after its installation + +![image8](https://cloud.githubusercontent.com/assets/2662304/14253735/536f4580-fa95-11e5-9f7b-99112b31a5d7.gif) + + +We suggest to keep your netdata updated. We are actively developing it and you should always update to the latest version. + +The update procedure depends on how you installed it: + +## You downloaded it from github using git + +### Manual update to get the latest git commit + +netdata versions older than `v1.12.0-rc2-52` had a `netdata-updater.sh` script in the root directory of the source code, which has now been deprecated. The manual process that works for all versions to get the latest commit in git is to use the `netdata-installer.sh`. The installer preserves your custom configuration and updates the the information of the installation in the `.environment` file under the user configuration directory. + +```sh +# go to the git downloaded directory +cd /path/to/git/downloaded/netdata + +# update your local copy +git pull + +# run the netdata installer +sudo ./netdata-installer.sh +``` + +_Netdata will be restarted with the new version._ + +Keep in mind, netdata may now have new features, or certain old features may now behave differently. So pay some attention to it after updating. + +### Manual update to get the latest nightly build + +The `kickstart.sh` one-liner will do a one-time update to the latest nightly build, if executed as follows: +``` +bash <(curl -Ss https://my-netdata.io/kickstart.sh --no-updates) +``` + +### Auto-update + +_Please, consider the risks of running an auto-update. Something can always go wrong. Keep an eye on your installation, and run a manual update if something ever fails._ + +Calling the `netdata-installer.sh` with the `--auto-update` or `-u` option will create the `netdata-updater` script under +either `/etc/cron.daily/`, or `/etc/periodic/daily/`. Whenever the `netdata-updater` is executed, it checks if a newer nightly build is available and then handles the download, installation and netdata restart. + +Note that after Jan 2019, the `kickstart.sh` one-liner `bash <(curl -Ss https://my-netdata.io/kickstart.sh)` calls the `netdata-installer.sh` with the auto-update option. So if you just run the one-liner without options once, your netdata will be kept auto-updated. + + +## You downloaded a binary package + +If you installed it from a binary package, the best way is to **obtain a newer copy** from the source you got it in the first place. + +If a newer version of netdata is not available from the source you got it, we suggest to uninstall the version you have and follow the **[[Installation]]** instructions for installing a fresh version of netdata. + + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Finstaller%2FUPDATE&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/installer/functions.sh b/packaging/installer/functions.sh new file mode 100644 index 000000000..a2d7365ea --- /dev/null +++ b/packaging/installer/functions.sh @@ -0,0 +1,797 @@ +# no shebang necessary - this is a library to be sourced +# SPDX-License-Identifier: GPL-3.0-or-later +# shellcheck disable=SC1091,SC1117,SC2002,SC2004,SC2034,SC2046,SC2059,SC2086,SC2129,SC2148,SC2154,SC2155,SC2162,SC2166,SC2181,SC2193 + +# make sure we have a UID +[ -z "${UID}" ] && UID="$(id -u)" + +# ----------------------------------------------------------------------------- +# checking the availability of commands + +which_cmd() { + # shellcheck disable=SC2230 + which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null +} + +check_cmd() { + which_cmd "${1}" >/dev/null 2>&1 && return 0 + return 1 +} + +# ----------------------------------------------------------------------------- + +setup_terminal() { + TPUT_RESET="" + TPUT_BLACK="" + TPUT_RED="" + TPUT_GREEN="" + TPUT_YELLOW="" + TPUT_BLUE="" + TPUT_PURPLE="" + TPUT_CYAN="" + TPUT_WHITE="" + TPUT_BGBLACK="" + TPUT_BGRED="" + TPUT_BGGREEN="" + TPUT_BGYELLOW="" + TPUT_BGBLUE="" + TPUT_BGPURPLE="" + TPUT_BGCYAN="" + TPUT_BGWHITE="" + TPUT_BOLD="" + TPUT_DIM="" + TPUT_UNDERLINED="" + TPUT_BLINK="" + TPUT_INVERTED="" + TPUT_STANDOUT="" + TPUT_BELL="" + TPUT_CLEAR="" + + # Is stderr on the terminal? If not, then fail + test -t 2 || return 1 + + if check_cmd tput; then + if [ $(($(tput colors 2>/dev/null))) -ge 8 ]; then + # Enable colors + TPUT_RESET="$(tput sgr 0)" + TPUT_BLACK="$(tput setaf 0)" + TPUT_RED="$(tput setaf 1)" + TPUT_GREEN="$(tput setaf 2)" + TPUT_YELLOW="$(tput setaf 3)" + TPUT_BLUE="$(tput setaf 4)" + TPUT_PURPLE="$(tput setaf 5)" + TPUT_CYAN="$(tput setaf 6)" + TPUT_WHITE="$(tput setaf 7)" + TPUT_BGBLACK="$(tput setab 0)" + TPUT_BGRED="$(tput setab 1)" + TPUT_BGGREEN="$(tput setab 2)" + TPUT_BGYELLOW="$(tput setab 3)" + TPUT_BGBLUE="$(tput setab 4)" + TPUT_BGPURPLE="$(tput setab 5)" + TPUT_BGCYAN="$(tput setab 6)" + TPUT_BGWHITE="$(tput setab 7)" + TPUT_BOLD="$(tput bold)" + TPUT_DIM="$(tput dim)" + TPUT_UNDERLINED="$(tput smul)" + TPUT_BLINK="$(tput blink)" + TPUT_INVERTED="$(tput rev)" + TPUT_STANDOUT="$(tput smso)" + TPUT_BELL="$(tput bel)" + TPUT_CLEAR="$(tput clear)" + fi + fi + + return 0 +} +setup_terminal || echo >/dev/null + +progress() { + echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " +} + +# ----------------------------------------------------------------------------- + +netdata_banner() { + local l1=" ^" \ + l2=" |.-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-. .-" \ + l3=" | '-' '-' '-' '-' '-' '-' '-' '-' '-' '-' '-' '-' " \ + l4=" +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+--->" \ + sp=" " \ + netdata="netdata" start end msg="${*}" chartcolor="${TPUT_DIM}" + + [ ${#msg} -lt ${#netdata} ] && msg="${msg}${sp:0:$((${#netdata} - ${#msg}))}" + [ ${#msg} -gt $((${#l2} - 20)) ] && msg="${msg:0:$((${#l2} - 23))}..." + + start="$((${#l2} / 2 - 4))" + [ $((start + ${#msg} + 4)) -gt ${#l2} ] && start=$((${#l2} - ${#msg} - 4)) + end=$((start + ${#msg} + 4)) + + echo >&2 + echo >&2 "${chartcolor}${l1}${TPUT_RESET}" + echo >&2 "${chartcolor}${l2:0:start}${sp:0:2}${TPUT_RESET}${TPUT_BOLD}${TPUT_GREEN}${netdata}${TPUT_RESET}${chartcolor}${sp:0:$((end - start - 2 - ${#netdata}))}${l2:end:$((${#l2} - end))}${TPUT_RESET}" + echo >&2 "${chartcolor}${l3:0:start}${sp:0:2}${TPUT_RESET}${TPUT_BOLD}${TPUT_CYAN}${msg}${TPUT_RESET}${chartcolor}${sp:0:2}${l3:end:$((${#l2} - end))}${TPUT_RESET}" + echo >&2 "${chartcolor}${l4}${TPUT_RESET}" + echo >&2 +} + +# ----------------------------------------------------------------------------- +# portable service command + +service_cmd="$(which_cmd service)" +rcservice_cmd="$(which_cmd rc-service)" +systemctl_cmd="$(which_cmd systemctl)" +service() { + local cmd="${1}" action="${2}" + + if [ ! -z "${systemctl_cmd}" ]; then + run "${systemctl_cmd}" "${action}" "${cmd}" + return $? + elif [ ! -z "${service_cmd}" ]; then + run "${service_cmd}" "${cmd}" "${action}" + return $? + elif [ ! -z "${rcservice_cmd}" ]; then + run "${rcservice_cmd}" "${cmd}" "${action}" + return $? + fi + return 1 +} + +# ----------------------------------------------------------------------------- +# portable pidof + +pidof_cmd="$(which_cmd pidof)" +pidof() { + if [ ! -z "${pidof_cmd}" ]; then + ${pidof_cmd} "${@}" + return $? + else + ps -acxo pid,comm | + sed "s/^ *//g" | + grep netdata | + cut -d ' ' -f 1 + return $? + fi +} + +# ----------------------------------------------------------------------------- +# portable delete recursively interactively + +portable_deletedir_recursively_interactively() { + if [ ! -z "$1" -a -d "$1" ]; then + if [ "$(uname -s)" = "Darwin" ]; then + echo >&2 + read >&2 -p "Press ENTER to recursively delete directory '$1' > " + echo >&2 "Deleting directory '$1' ..." + run rm -R "$1" + else + echo >&2 + echo >&2 "Deleting directory '$1' ..." + run rm -I -R "$1" + fi + else + echo "Directory '$1' does not exist." + fi +} + +# ----------------------------------------------------------------------------- + +export SYSTEM_CPUS=1 +portable_find_processors() { + if [ -f "/proc/cpuinfo" ]; then + # linux + SYSTEM_CPUS=$(grep -c ^processor /proc/cpuinfo) + else + # freebsd + SYSTEM_CPUS=$(sysctl hw.ncpu 2>/dev/null | grep ^hw.ncpu | cut -d ' ' -f 2) + fi + [ -z "${SYSTEM_CPUS}" -o $((SYSTEM_CPUS)) -lt 1 ] && SYSTEM_CPUS=1 +} +portable_find_processors + +# ----------------------------------------------------------------------------- + +run_ok() { + printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" +} + +run_failed() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" +} + +ESCAPED_PRINT_METHOD= +printf "%q " test >/dev/null 2>&1 +[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq" +escaped_print() { + if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then + printf "%q " "${@}" + else + printf "%s" "${*}" + fi + return 0 +} + +run_logfile="/dev/null" +run() { + local user="${USER--}" dir="${PWD}" info info_console + + if [ "${UID}" = "0" ]; then + info="[root ${dir}]# " + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " + else + info="[${user} ${dir}]$ " + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " + fi + + printf >>"${run_logfile}" "${info}" + escaped_print >>"${run_logfile}" "${@}" + printf >>"${run_logfile}" " ... " + + printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" + escaped_print >&2 "${@}" + printf >&2 "${TPUT_RESET}\n" + + "${@}" + + local ret=$? + if [ ${ret} -ne 0 ]; then + run_failed + printf >>"${run_logfile}" "FAILED with exit code ${ret}\n" + else + run_ok + printf >>"${run_logfile}" "OK\n" + fi + + return ${ret} +} + +getent_cmd="$(which_cmd getent)" +portable_check_user_exists() { + local username="${1}" found= + + if [ ! -z "${getent_cmd}" ]; then + "${getent_cmd}" passwd "${username}" >/dev/null 2>&1 + return $? + fi + + found="$(cut -d ':' -f 1 /dev/null 2>&1 + return $? + fi + + found="$(cut -d ':' -f 1 &2 "User '${username}' already exists." && return 0 + + echo >&2 "Adding ${username} user account with home ${homedir} ..." + + # shellcheck disable=SC2230 + local nologin="$(which nologin 2>/dev/null || command -v nologin 2>/dev/null || echo '/bin/false')" + + # Linux + if check_cmd useradd; then + run useradd -r -g "${username}" -c "${username}" -s "${nologin}" --no-create-home -d "${homedir}" "${username}" && return 0 + fi + + # FreeBSD + if check_cmd pw; then + run pw useradd "${username}" -d "${homedir}" -g "${username}" -s "${nologin}" && return 0 + fi + + # BusyBox + if check_cmd adduser; then + run adduser -h "${homedir}" -s "${nologin}" -D -G "${username}" "${username}" && return 0 + fi + + echo >&2 "Failed to add ${username} user account !" + + return 1 +} + +portable_add_group() { + local groupname="${1}" + + portable_check_group_exists "${groupname}" + [ $? -eq 0 ] && echo >&2 "Group '${groupname}' already exists." && return 0 + + echo >&2 "Adding ${groupname} user group ..." + + # Linux + if check_cmd groupadd; then + run groupadd -r "${groupname}" && return 0 + fi + + # FreeBSD + if check_cmd pw; then + run pw groupadd "${groupname}" && return 0 + fi + + # BusyBox + if check_cmd addgroup; then + run addgroup "${groupname}" && return 0 + fi + + echo >&2 "Failed to add ${groupname} user group !" + return 1 +} + +portable_add_user_to_group() { + local groupname="${1}" username="${2}" + + portable_check_group_exists "${groupname}" + [ $? -ne 0 ] && echo >&2 "Group '${groupname}' does not exist." && return 1 + + # find the user is already in the group + if portable_check_user_in_group "${username}" "${groupname}"; then + # username is already there + echo >&2 "User '${username}' is already in group '${groupname}'." + return 0 + else + # username is not in group + echo >&2 "Adding ${username} user to the ${groupname} group ..." + + # Linux + if check_cmd usermod; then + run usermod -a -G "${groupname}" "${username}" && return 0 + fi + + # FreeBSD + if check_cmd pw; then + run pw groupmod "${groupname}" -m "${username}" && return 0 + fi + + # BusyBox + if check_cmd addgroup; then + run addgroup "${username}" "${groupname}" && return 0 + fi + + echo >&2 "Failed to add user ${username} to group ${groupname} !" + return 1 + fi +} + +iscontainer() { + # man systemd-detect-virt + local cmd=$(which_cmd systemd-detect-virt) + if [ ! -z "${cmd}" -a -x "${cmd}" ]; then + "${cmd}" --container >/dev/null 2>&1 && return 0 + fi + + # /proc/1/sched exposes the host's pid of our init ! + # http://stackoverflow.com/a/37016302 + local pid=$(cat /proc/1/sched 2>/dev/null | head -n 1 | { + IFS='(),#:' read name pid th threads + echo $pid + }) + if [ ! -z "${pid}" ]; then + pid=$(( pid + 0 )) + [ ${pid} -gt 1 ] && return 0 + fi + + # lxc sets environment variable 'container' + [ ! -z "${container}" ] && return 0 + + # docker creates /.dockerenv + # http://stackoverflow.com/a/25518345 + [ -f "/.dockerenv" ] && return 0 + + # ubuntu and debian supply /bin/running-in-container + # https://www.apt-browse.org/browse/ubuntu/trusty/main/i386/upstart/1.12.1-0ubuntu4/file/bin/running-in-container + if [ -x "/bin/running-in-container" ]; then + "/bin/running-in-container" >/dev/null 2>&1 && return 0 + fi + + return 1 +} + +issystemd() { + local pids p myns ns systemctl + + # if the directory /lib/systemd/system OR /usr/lib/systemd/system (SLES 12.x) does not exit, it is not systemd + [ ! -d /lib/systemd/system -a ! -d /usr/lib/systemd/system ] && return 1 + + # if there is no systemctl command, it is not systemd + # shellcheck disable=SC2230 + systemctl=$(which systemctl 2>/dev/null || command -v systemctl 2>/dev/null) + [ -z "${systemctl}" -o ! -x "${systemctl}" ] && return 1 + + # if pid 1 is systemd, it is systemd + [ "$(basename $(readlink /proc/1/exe) 2>/dev/null)" = "systemd" ] && return 0 + + # if systemd is not running, it is not systemd + pids=$(pidof systemd 2>/dev/null) + [ -z "${pids}" ] && return 1 + + # check if the running systemd processes are not in our namespace + myns="$(readlink /proc/self/ns/pid 2>/dev/null)" + for p in ${pids}; do + ns="$(readlink /proc/${p}/ns/pid 2>/dev/null)" + + # if pid of systemd is in our namespace, it is systemd + [ ! -z "${myns}" ] && [ "${myns}" = "${ns}" ] && return 0 + done + + # else, it is not systemd + return 1 +} + +install_non_systemd_init() { + [ "${UID}" != 0 ] && return 1 + + local key="unknown" + if [ -f /etc/os-release ]; then + source /etc/os-release || return 1 + key="${ID}-${VERSION_ID}" + + elif [ -f /etc/redhat-release ]; then + key=$(&2 "Installing OpenRC init file..." + run cp system/netdata-openrc /etc/init.d/netdata && + run chmod 755 /etc/init.d/netdata && + run rc-update add netdata default && + return 0 + + elif [ "${key}" = "debian-7" \ + -o "${key}" = "ubuntu-12.04" \ + -o "${key}" = "ubuntu-14.04" \ + ]; then + echo >&2 "Installing LSB init file..." + run cp system/netdata-lsb /etc/init.d/netdata && + run chmod 755 /etc/init.d/netdata && + run update-rc.d netdata defaults && + run update-rc.d netdata enable && + return 0 + elif [[ ${key} =~ ^(amzn-201[5678]|ol|CentOS release 6|Red Hat Enterprise Linux Server release 6|Scientific Linux CERN SLC release 6|CloudLinux Server release 6).* ]]; then + echo >&2 "Installing init.d file..." + run cp system/netdata-init-d /etc/init.d/netdata && + run chmod 755 /etc/init.d/netdata && + run chkconfig netdata on && + return 0 + else + echo >&2 "I don't know what init file to install on system '${key}'. Open a github issue to help us fix it." + return 1 + fi + elif [ -f /etc/init.d/netdata ]; then + echo >&2 "file '/etc/init.d/netdata' already exists." + return 0 + else + echo >&2 "I don't know what init file to install on system '${key}'. Open a github issue to help us fix it." + fi + + return 1 +} + +NETDATA_START_CMD="netdata" +NETDATA_STOP_CMD="killall netdata" + +install_netdata_service() { + local uname="$(uname 2>/dev/null)" + + if [ "${UID}" -eq 0 ]; then + if [ "${uname}" = "Darwin" ]; then + + if [ -f "/Library/LaunchDaemons/com.github.netdata.plist" ]; then + echo >&2 "file '/Library/LaunchDaemons/com.github.netdata.plist' already exists." + return 0 + else + echo >&2 "Installing MacOS X plist file..." + run cp system/netdata.plist /Library/LaunchDaemons/com.github.netdata.plist && + run launchctl load /Library/LaunchDaemons/com.github.netdata.plist && + return 0 + fi + + elif [ "${uname}" = "FreeBSD" ]; then + + run cp system/netdata-freebsd /etc/rc.d/netdata && + NETDATA_START_CMD="service netdata start" && + NETDATA_STOP_CMD="service netdata stop" && + return 0 + + elif issystemd; then + # systemd is running on this system + NETDATA_START_CMD="systemctl start netdata" + NETDATA_STOP_CMD="systemctl stop netdata" + + SYSTEMD_DIRECTORY="" + + if [ -d "/lib/systemd/system" ]; then + SYSTEMD_DIRECTORY="/lib/systemd/system" + elif [ -d "/usr/lib/systemd/system" ]; then + SYSTEMD_DIRECTORY="/usr/lib/systemd/system" + fi + + if [ "${SYSTEMD_DIRECTORY}x" != "x" ]; then + echo >&2 "Installing systemd service..." + run cp system/netdata.service "${SYSTEMD_DIRECTORY}/netdata.service" && + run systemctl daemon-reload && + run systemctl enable netdata && + return 0 + else + echo >&2 "no systemd directory; cannot install netdata.service" + fi + else + install_non_systemd_init + local ret=$? + + if [ ${ret} -eq 0 ]; then + if [ ! -z "${service_cmd}" ]; then + NETDATA_START_CMD="service netdata start" + NETDATA_STOP_CMD="service netdata stop" + elif [ ! -z "${rcservice_cmd}" ]; then + NETDATA_START_CMD="rc-service netdata start" + NETDATA_STOP_CMD="rc-service netdata stop" + fi + fi + + return ${ret} + fi + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# stop netdata + +pidisnetdata() { + if [ -d /proc/self ]; then + [ -z "$1" -o ! -f "/proc/$1/stat" ] && return 1 + [ "$(cat "/proc/$1/stat" | cut -d '(' -f 2 | cut -d ')' -f 1)" = "netdata" ] && return 0 + return 1 + fi + return 0 +} + +stop_netdata_on_pid() { + local pid="${1}" ret=0 count=0 + + pidisnetdata ${pid} || return 0 + + printf >&2 "Stopping netdata on pid ${pid} ..." + while [ ! -z "$pid" -a ${ret} -eq 0 ]; do + if [ ${count} -gt 45 ]; then + echo >&2 "Cannot stop the running netdata on pid ${pid}." + return 1 + fi + + count=$((count + 1)) + + run kill ${pid} 2>/dev/null + ret=$? + + test ${ret} -eq 0 && printf >&2 "." && sleep 2 + done + + echo >&2 + if [ ${ret} -eq 0 ]; then + echo >&2 "SORRY! CANNOT STOP netdata ON PID ${pid} !" + return 1 + fi + + echo >&2 "netdata on pid ${pid} stopped." + return 0 +} + +netdata_pids() { + local p myns ns + + myns="$(readlink /proc/self/ns/pid 2>/dev/null)" + + # echo >&2 "Stopping a (possibly) running netdata (namespace '${myns}')..." + + for p in \ + $(cat /var/run/netdata.pid 2>/dev/null) \ + $(cat /var/run/netdata/netdata.pid 2>/dev/null) \ + $(pidof netdata 2>/dev/null); do + ns="$(readlink /proc/${p}/ns/pid 2>/dev/null)" + + if [ -z "${myns}" -o -z "${ns}" -o "${myns}" = "${ns}" ]; then + pidisnetdata ${p} && echo "${p}" + fi + done +} + +stop_all_netdata() { + local p + for p in $(netdata_pids); do + stop_netdata_on_pid ${p} + done +} + +# ----------------------------------------------------------------------------- +# restart netdata + +restart_netdata() { + local netdata="${1}" + shift + + local started=0 + + progress "Start netdata" + + if [ "${UID}" -eq 0 ]; then + service netdata stop + stop_all_netdata + service netdata restart && started=1 + + if [ ${started} -eq 1 -a -z "$(netdata_pids)" ]; then + echo >&2 "Ooops! it seems netdata is not started." + started=0 + fi + + if [ ${started} -eq 0 ]; then + service netdata start && started=1 + fi + fi + + if [ ${started} -eq 1 -a -z "$(netdata_pids)" ]; then + echo >&2 "Hm... it seems netdata is still not started." + started=0 + fi + + if [ ${started} -eq 0 ]; then + # still not started... + + run stop_all_netdata + run "${netdata}" "${@}" + return $? + fi + + return 0 +} + +# ----------------------------------------------------------------------------- +# install netdata logrotate + +install_netdata_logrotate() { + if [ ${UID} -eq 0 ]; then + if [ -d /etc/logrotate.d ]; then + if [ ! -f /etc/logrotate.d/netdata ]; then + run cp system/netdata.logrotate /etc/logrotate.d/netdata + fi + + if [ -f /etc/logrotate.d/netdata ]; then + run chmod 644 /etc/logrotate.d/netdata + fi + + return 0 + fi + fi + + return 1 +} + +# ----------------------------------------------------------------------------- +# download netdata.conf + +fix_netdata_conf() { + local owner="${1}" + + if [ "${UID}" -eq 0 ]; then + run chown "${owner}" "${filename}" + fi + run chmod 0664 "${filename}" +} + +generate_netdata_conf() { + local owner="${1}" filename="${2}" url="${3}" + + if [ ! -s "${filename}" ]; then + cat >"${filename}" <&2 + echo >&2 "-------------------------------------------------------------------------------" + echo >&2 + echo >&2 "Downloading default configuration from netdata..." + sleep 5 + + # remove a possibly obsolete download + [ -f "${filename}.new" ] && rm "${filename}.new" + + # disable a proxy to get data from the local netdata + export http_proxy= + export https_proxy= + + # try curl + run curl -s -o "${filename}.new" "${url}" + ret=$? + + if [ ${ret} -ne 0 -o ! -s "${filename}.new" ]; then + # try wget + run wget -O "${filename}.new" "${url}" + ret=$? + fi + + if [ ${ret} -eq 0 -a -s "${filename}.new" ]; then + run mv "${filename}.new" "${filename}" + run_ok "New configuration saved for you to edit at ${filename}" + else + [ -f "${filename}.new" ] && rm "${filename}.new" + run_failed "Cannnot download configuration from netdata daemon using url '${url}'" + + generate_netdata_conf "${owner}" "${filename}" "${url}" + fi + + fix_netdata_conf "${owner}" + fi +} + +# ----------------------------------------------------------------------------- +# add netdata user and group + +NETDATA_WANTED_GROUPS="docker nginx varnish haproxy adm nsd proxy squid ceph nobody" +NETDATA_ADDED_TO_GROUPS="" +add_netdata_user_and_group() { + local homedir="${1}" g + + if [ ${UID} -eq 0 ]; then + portable_add_group netdata || return 1 + portable_add_user netdata "${homedir}" || return 1 + + for g in ${NETDATA_WANTED_GROUPS}; do + portable_add_user_to_group ${g} netdata && NETDATA_ADDED_TO_GROUPS="${NETDATA_ADDED_TO_GROUPS} ${g}" + done + + [ ~netdata = / ] && cat </dev/null || command -v "${1}" 2>/dev/null +} + +check_cmd() { + which_cmd "${1}" >/dev/null 2>&1 && return 0 + return 1 +} + +setup_terminal() { + TPUT_RESET="" + TPUT_BLACK="" + TPUT_RED="" + TPUT_GREEN="" + TPUT_YELLOW="" + TPUT_BLUE="" + TPUT_PURPLE="" + TPUT_CYAN="" + TPUT_WHITE="" + TPUT_BGBLACK="" + TPUT_BGRED="" + TPUT_BGGREEN="" + TPUT_BGYELLOW="" + TPUT_BGBLUE="" + TPUT_BGPURPLE="" + TPUT_BGCYAN="" + TPUT_BGWHITE="" + TPUT_BOLD="" + TPUT_DIM="" + TPUT_UNDERLINED="" + TPUT_BLINK="" + TPUT_INVERTED="" + TPUT_STANDOUT="" + TPUT_BELL="" + TPUT_CLEAR="" + + # Is stderr on the terminal? If not, then fail + test -t 2 || return 1 + + if check_cmd tput + then + if [ $(( $(tput colors 2>/dev/null) )) -ge 8 ] + then + # Enable colors + TPUT_RESET="$(tput sgr 0)" + TPUT_BLACK="$(tput setaf 0)" + TPUT_RED="$(tput setaf 1)" + TPUT_GREEN="$(tput setaf 2)" + TPUT_YELLOW="$(tput setaf 3)" + TPUT_BLUE="$(tput setaf 4)" + TPUT_PURPLE="$(tput setaf 5)" + TPUT_CYAN="$(tput setaf 6)" + TPUT_WHITE="$(tput setaf 7)" + TPUT_BGBLACK="$(tput setab 0)" + TPUT_BGRED="$(tput setab 1)" + TPUT_BGGREEN="$(tput setab 2)" + TPUT_BGYELLOW="$(tput setab 3)" + TPUT_BGBLUE="$(tput setab 4)" + TPUT_BGPURPLE="$(tput setab 5)" + TPUT_BGCYAN="$(tput setab 6)" + TPUT_BGWHITE="$(tput setab 7)" + TPUT_BOLD="$(tput bold)" + TPUT_DIM="$(tput dim)" + TPUT_UNDERLINED="$(tput smul)" + TPUT_BLINK="$(tput blink)" + TPUT_INVERTED="$(tput rev)" + TPUT_STANDOUT="$(tput smso)" + TPUT_BELL="$(tput bel)" + TPUT_CLEAR="$(tput clear)" + fi + fi + + return 0 +} +setup_terminal || echo >/dev/null + +progress() { + echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " +} + +run_ok() { + printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" +} + +run_failed() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" +} + +ESCAPED_PRINT_METHOD= +printf "%q " test >/dev/null 2>&1 +[ $? -eq 0 ] && ESCAPED_PRINT_METHOD="printfq" +escaped_print() { + if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ] + then + printf "%q " "${@}" + else + printf "%s" "${*}" + fi + return 0 +} + +run_logfile="/dev/null" +run() { + local user="${USER--}" dir="${PWD}" info info_console + + if [ "${UID}" = "0" ] + then + info="[root ${dir}]# " + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " + else + info="[${user} ${dir}]$ " + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " + fi + + printf >> "${run_logfile}" "${info}" + escaped_print >> "${run_logfile}" "${@}" + printf >> "${run_logfile}" " ... " + + printf >&2 "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" + escaped_print >&2 "${@}" + printf >&2 "${TPUT_RESET}\n" + + "${@}" + + local ret=$? + if [ ${ret} -ne 0 ] + then + run_failed + printf >> "${run_logfile}" "FAILED with exit code ${ret}\n" + else + run_ok + printf >> "${run_logfile}" "OK\n" + fi + + return ${ret} +} + + +# --------------------------------------------------------------------------------------------------------------------- + +fatal() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n" + exit 1 +} + +# --------------------------------------------------------------------------------------------------------------------- + +if [ "$(uname -m)" != "x86_64" ] + then + fatal "Static binary versions of netdata are available only for 64bit Intel/AMD CPUs (x86_64), but yours is: $(uname -m)." +fi + +if [ "$(uname -s)" != "Linux" ] + then + fatal "Static binary versions of netdata are available only for Linux, but this system is $(uname -s)" +fi + +curl="$(which_cmd curl)" +wget="$(which_cmd wget)" + +# --------------------------------------------------------------------------------------------------------------------- + +progress "Checking the latest version of static build..." + +BASE='https://raw.githubusercontent.com/netdata/binary-packages/master' + +LATEST= +if [ ! -z "${curl}" -a -x "${curl}" ] +then + LATEST="$(run ${curl} "${BASE}/netdata-latest.gz.run")" +elif [ ! -z "${wget}" -a -x "${wget}" ] +then + LATEST="$(run ${wget} -O - "${BASE}/netdata-latest.gz.run")" +else + fatal "curl or wget are needed for this script to work." +fi + +if [ -z "${LATEST}" ] + then + fatal "Cannot find the latest static binary version of netdata." +fi + +# --------------------------------------------------------------------------------------------------------------------- + +progress "Downloading static netdata binary: ${LATEST}" + +ret=1 +if [ ! -z "${curl}" -a -x "${curl}" ] +then + run ${curl} "${BASE}/${LATEST}" >"/tmp/${LATEST}" + ret=$? +elif [ ! -z "${wget}" -a -x "${wget}" ] +then + run ${wget} -O "/tmp/${LATEST}" "${BASE}/${LATEST}" + ret=$? +else + fatal "curl or wget are needed for this script to work." +fi + +if [ ${ret} -ne 0 -o ! -s "/tmp/${LATEST}" ] + then + fatal "Failed to download the latest static binary version of netdata." +fi + +# --------------------------------------------------------------------------------------------------------------------- + +opts= +inner_opts= +while [ ! -z "${1}" ] +do + if [ "${1}" = "--dont-wait" -o "${1}" = "--non-interactive" -o "${1}" = "--accept" ] + then + opts="${opts} --accept" + elif [ "${1}" = "--dont-start-it" ] + then + inner_opts="${inner_opts} ${1}" + else + echo >&2 "Unknown option '${1}'" + exit 1 + fi + shift +done +[ ! -z "${inner_opts}" ] && inner_opts="-- ${inner_opts}" + +# --------------------------------------------------------------------------------------------------------------------- + +progress "Installing netdata" + +sudo= +[ "${UID}" != "0" ] && sudo="sudo" +run ${sudo} sh "/tmp/${LATEST}" ${opts} ${inner_opts} + +if [ $? -eq 0 ] + then + rm "/tmp/${LATEST}" +else + echo >&2 "NOTE: did not remove: /tmp/${LATEST}" +fi diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh new file mode 100755 index 000000000..2a5c874da --- /dev/null +++ b/packaging/installer/kickstart.sh @@ -0,0 +1,272 @@ +#!/usr/bin/env sh +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Run me with: +# +# bash <(curl -Ss https://my-netdata.io/kickstart.sh) +# +# or (to install all netdata dependencies): +# +# bash <(curl -Ss https://my-netdata.io/kickstart.sh) all +# +# Other options: +# --dont-wait do not prompt for user input +# --non-interactive do not prompt for user input +# --no-updates do not install script for daily updates +# +# This script will: +# +# 1. install all netdata compilation dependencies +# using the package manager of the system +# +# 2. download netdata nightly package to temporary directory +# +# 3. install netdata + +# shellcheck disable=SC2039,SC2059,SC2086 + +# External files +PACKAGES_SCRIPT="https://raw.githubusercontent.com/netdata/netdata-demo-site/master/install-required-packages.sh" +NIGHTLY_PACKAGE_TARBALL="https://storage.googleapis.com/netdata-nightlies/netdata-latest.tar.gz" +NIGHTLY_PACKAGE_CHECKSUM="https://storage.googleapis.com/netdata-nightlies/sha256sums.txt" + +# --------------------------------------------------------------------------------------------------------------------- +# library functions copied from packaging/installer/functions.sh + +setup_terminal() { + TPUT_RESET="" + TPUT_YELLOW="" + TPUT_WHITE="" + TPUT_BGRED="" + TPUT_BGGREEN="" + TPUT_BOLD="" + TPUT_DIM="" + + # Is stderr on the terminal? If not, then fail + test -t 2 || return 1 + + if command -v tput >/dev/null 2>&1; then + if [ $(($(tput colors 2>/dev/null))) -ge 8 ]; then + # Enable colors + TPUT_RESET="$(tput sgr 0)" + TPUT_YELLOW="$(tput setaf 3)" + TPUT_WHITE="$(tput setaf 7)" + TPUT_BGRED="$(tput setab 1)" + TPUT_BGGREEN="$(tput setab 2)" + TPUT_BOLD="$(tput bold)" + TPUT_DIM="$(tput dim)" + fi + fi + + return 0 +} + +progress() { + echo >&2 " --- ${TPUT_DIM}${TPUT_BOLD}${*}${TPUT_RESET} --- " +} + +escaped_print() { + if printf "%q " test >/dev/null 2>&1; then + printf "%q " "${@}" + else + printf "%s" "${*}" + fi + return 0 +} + +run() { + local dir="${PWD}" info_console + + if [ "${UID}" = "0" ]; then + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " + else + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " + fi + + escaped_print "${info_console}${TPUT_BOLD}${TPUT_YELLOW}" "${@}" "${TPUT_RESET}\n" >&2 + + "${@}" + + local ret=$? + if [ ${ret} -ne 0 ]; then + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET} ${*} \n\n" + else + printf >&2 "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET} ${*} \n\n" + fi + + return ${ret} +} + +warning() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} WARNING ${TPUT_RESET} ${*} \n\n" + if [ "${INTERACTIVE}" = "0" ]; then + fatal "Stopping due to non-interactive mode. Fix the issue or retry installation in an interactive mode." + else + read -r -p "Press ENTER to attempt netdata installation > " + progress "OK, let's give it a try..." + fi +} + +fatal() { + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n" + exit 1 +} + +download() { + url="${1}" + dest="${2}" + if command -v wget >/dev/null 2>&1; then + run wget -O - "${url}" >"${dest}" || fatal "Cannot download ${url}" + elif command -v curl >/dev/null 2>&1; then + run curl "${url}" >"${dest}" || fatal "Cannot download ${url}" + else + fatal "I need curl or wget to proceed, but neither is available on this system." + fi +} + +detect_bash4() { + bash="${1}" + if [ -z "${BASH_VERSION}" ]; then + # we don't run under bash + if [ -n "${bash}" ] && [ -x "${bash}" ]; then + # shellcheck disable=SC2016 + BASH_MAJOR_VERSION=$(${bash} -c 'echo "${BASH_VERSINFO[0]}"') + fi + else + # we run under bash + BASH_MAJOR_VERSION="${BASH_VERSINFO[0]}" + fi + + if [ -z "${BASH_MAJOR_VERSION}" ]; then + echo >&2 "No BASH is available on this system" + return 1 + elif [ $((BASH_MAJOR_VERSION)) -lt 4 ]; then + echo >&2 "No BASH v4+ is available on this system (installed bash is v${BASH_MAJOR_VERSION}" + return 1 + fi + return 0 +} + +umask 022 + +sudo="" +[ -z "${UID}" ] && UID="$(id -u)" +[ "${UID}" -ne "0" ] && sudo="sudo" +export PATH="${PATH}:/usr/local/bin:/usr/local/sbin" + +setup_terminal || echo >/dev/null + +# --------------------------------------------------------------------------------------------------------------------- +# try to update using autoupdater in the first place + +updater="" +[ -x /etc/periodic/daily/netdata-updater ] && updater=/etc/periodic/daily/netdata-updater +[ -x /etc/cron.daily/netdata-updater ] && updater=/etc/cron.daily/netdata-updater +if [ -L "${updater}" ]; then + # remove old updater (symlink) + run "${sudo}" rm -f "${updater}" + updater="" +fi +if [ -n "${updater}" ]; then + # attempt to run the updater, to respect any compilation settings already in place + progress "Re-installing netdata..." + run "${sudo}" "${updater}" -f || fatal "Failed to forcefully update netdata" + exit 0 +fi + +# --------------------------------------------------------------------------------------------------------------------- +# install required system packages + +INTERACTIVE=1 +PACKAGES_INSTALLER_OPTIONS="netdata" +NETDATA_INSTALLER_OPTIONS="" +NETDATA_UPDATES="--auto-update" +while [ -n "${1}" ]; do + if [ "${1}" = "all" ]; then + PACKAGES_INSTALLER_OPTIONS="netdata-all" + shift 1 + elif [ "${1}" = "--dont-wait" ] || [ "${1}" = "--non-interactive" ]; then + INTERACTIVE=0 + shift 1 + elif [ "${1}" = "--no-updates" ]; then + # echo >&2 "netdata will not auto-update" + NETDATA_UPDATES= + shift 1 + else + break + fi +done + +if [ "${INTERACTIVE}" = "0" ]; then + PACKAGES_INSTALLER_OPTIONS="--dont-wait --non-interactive ${PACKAGES_INSTALLER_OPTIONS}" + NETDATA_INSTALLER_OPTIONS="--dont-wait" +fi + +# --------------------------------------------------------------------------------------------------------------------- +# detect system parameters and install dependencies + +SYSTEM="$(uname -s)" +OS="$(uname -o)" +MACHINE="$(uname -m)" + +cat </dev/null 2>&1; then + failed "Tarball checksum validation failed. Stopping netdata installation and leaving tarball in ${tmpdir}" +fi +run tar -xf netdata-latest.tar.gz +rm -rf netdata-latest.tar.gz >/dev/null 2>&1 +cd netdata-* || fatal "Cannot cd to netdata source tree" + +# --------------------------------------------------------------------------------------------------------------------- +# install netdata from source + +if [ -x netdata-installer.sh ]; then + progress "Installing netdata..." + run ${sudo} ./netdata-installer.sh ${NETDATA_UPDATES} ${NETDATA_INSTALLER_OPTIONS} "${@}" || fatal "netdata-installer.sh exited with error" + rm -rf "${tmpdir}" >/dev/null 2>&1 +else + fatal "Cannot install netdata from source (the source directory does not include netdata-installer.sh). Leaving all files in ${tmpdir}" +fi diff --git a/packaging/installer/netdata-uninstaller.sh b/packaging/installer/netdata-uninstaller.sh new file mode 100755 index 000000000..96dd62906 --- /dev/null +++ b/packaging/installer/netdata-uninstaller.sh @@ -0,0 +1,169 @@ +#!/usr/bin/env bash +#shellcheck disable=SC2181 + +# this script will uninstall netdata + +# Variables needed by script and taken from '.environment' file: +# - NETDATA_PREFIX +# - NETDATA_ADDED_TO_GROUPS + +usage="$(basename "$0") [-h] [-f ] -- program to calculate the answer to life, the universe and everything + +where: + -e, --env path to environment file (defauls to '/etc/netdata/.environment' + -f, --force force uninstallation and do not ask any questions + -h show this help text + -y, --yes flag needs to be set to proceed with uninstallation" + +FILE_REMOVAL_STATUS=0 +ENVIRONMENT_FILE="/etc/netdata/.environment" +INTERACTIVITY="-i" +YES=0 +while :; do + case "$1" in + -h | --help) + echo "$usage" >&2 + exit 1 + ;; + -f | --force) + INTERACTIVITY="-f" + shift + ;; + -y | --yes) + YES=1 + shift + ;; + -e | --env) + ENVIRONMENT_FILE="$2" + shift 2 + ;; + -*) + echo "$usage" >&2 + exit 1 + ;; + *) break ;; + esac +done + +if [ "$YES" != "1" ]; then + echo "This script will REMOVE netdata from your system." + echo "Run it again with --yes to do it." + exit 1 +fi + +if [[ $EUID -ne 0 ]]; then + echo "This script SHOULD be run as root or otherwise it won't delete all installed components." + key="n" + read -r -s -n 1 -p "Do you want to continue as non-root user [y/n] ? " key + if [ "$key" != "y" ] && [ "$key" != "Y" ]; then + exit 1 + fi +fi + +function quit_msg() { + echo + if [ "$FILE_REMOVAL_STATUS" -eq 0 ]; then + echo "Something went wrong :(" + else + echo "Netdata files were successfully removed from your system" + fi +} + +function user_input() { + TEXT="$1" + if [ "${INTERACTIVITY}" == "-i" ]; then + read -r -p "$TEXT" >&2 + fi +} + +function rm_file() { + FILE="$1" + if [ -f "${FILE}" ]; then + rm -v ${INTERACTIVITY} "${FILE}" + fi +} + +function rm_dir() { + DIR="$1" + if [ -n "$DIR" ] && [ -d "$DIR" ]; then + user_input "Press ENTER to recursively delete directory '$DIR' > " + rm -v -f -R "${DIR}" + fi +} + +netdata_pids() { + local p myns ns + myns="$(readlink /proc/self/ns/pid 2>/dev/null)" + for p in \ + $(cat /var/run/netdata.pid 2>/dev/null) \ + $(cat /var/run/netdata/netdata.pid 2>/dev/null) \ + $(pidof netdata 2>/dev/null); do + + ns="$(readlink "/proc/${p}/ns/pid" 2>/dev/null)" + #shellcheck disable=SC2002 + if [ -z "${myns}" ] || [ -z "${ns}" ] || [ "${myns}" = "${ns}" ]; then + name="$(cat "/proc/${p}/stat" 2>/dev/null | cut -d '(' -f 2 | cut -d ')' -f 1)" + if [ "${name}" = "netdata" ]; then + echo "${p}" + fi + fi + done +} + +trap quit_msg EXIT + +#shellcheck source=/dev/null +source "${ENVIRONMENT_FILE}" || exit 1 + +#### STOP NETDATA +echo "Stopping a possibly running netdata..." +for p in $(netdata_pids); do + i=0 + while kill "${p}" 2>/dev/null; do + if [ "$i" -gt 30 ]; then + echo "Forcefully stopping netdata with pid ${p}" + kill -9 "${p}" + sleep 2 + break + fi + sleep 1 + i=$((i + 1)) + done +done +sleep 2 + +#### REMOVE NETDATA FILES +rm_file /etc/logrotate.d/netdata +rm_file /etc/systemd/system/netdata.service +rm_file /lib/systemd/system/netdata.service +rm_file /usr/lib/systemd/system/netdata.service +rm_file /etc/init.d/netdata +rm_file /etc/periodic/daily/netdata-updater +rm_file /etc/cron.daily/netdata-updater + +if [ -n "${NETDATA_PREFIX}" ] && [ -d "${NETDATA_PREFIX}" ]; then + rm_dir "${NETDATA_PREFIX}" +else + rm_file "/usr/sbin/netdata" + rm_dir "/usr/share/netdata" + rm_dir "/usr/libexec/netdata" + rm_dir "/var/lib/netdata" + rm_dir "/var/cache/netdata" + rm_dir "/var/log/netdata" + rm_dir "/etc/netdata" +fi + +FILE_REMOVAL_STATUS=1 + +#### REMOVE NETDATA USER & GROUP +if [ -n "$NETDATA_ADDED_TO_GROUPS" ]; then + user_input "Press ENTER to delete 'netdata' from following groups: '$NETDATA_ADDED_TO_GROUPS' > " + for group in $NETDATA_ADDED_TO_GROUPS; do + gpasswd -d netdata "${group}" + done +fi + +user_input "Press ENTER to delete 'netdata' system user > " +userdel -f netdata || : +user_input "Press ENTER to delete 'netdata' system group > " +groupdel -f netdata || : diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh new file mode 100644 index 000000000..96f7c1270 --- /dev/null +++ b/packaging/installer/netdata-updater.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash +#shellcheck disable=SC2164 + +# this script will uninstall netdata + +# Variables needed by script: +# - PATH +# - CFLAGS +# - NETDATA_CONFIGURE_OPTIONS +# - REINSTALL_COMMAND +# - NETDATA_TARBALL_URL +# - NETDATA_TARBALL_CHECKSUM_URL +# - NETDATA_TARBALL_CHECKSUM + + +# Usually stored in /etc/netdata/.environment +: "${ENVIRONMENT_FILE:=THIS_SHOULD_BE_REPLACED_BY_INSTALLER_SCRIPT}" + +# shellcheck source=/dev/null +source "${ENVIRONMENT_FILE}" || exit 1 + +if [ "${INSTALL_UID}" != "$(id -u)" ]; then + echo >&2 "You are running this script as user with uid $(id -u). We recommend to run this script as root (user with uid 0)" + exit 1 +fi + +# signal netdata to start saving its database +# this is handy if your database is big +pids=$(pidof netdata) +do_not_start= +if [ -n "${pids}" ]; then + #shellcheck disable=SC2086 + kill -USR1 ${pids} +else + # netdata is currently not running, so do not start it after updating + do_not_start="--dont-start-it" +fi + +tmp= +if [ -t 2 ]; then + # we are running on a terminal + # open fd 3 and send it to stderr + exec 3>&2 +else + # we are headless + # create a temporary file for the log + tmp=$(mktemp /tmp/netdata-updater.log.XXXXXX) + # open fd 3 and send it to tmp + exec 3>"${tmp}" +fi + +info() { + echo >&3 "$(date) : INFO: " "${@}" +} + +error() { + echo >&3 "$(date) : ERROR: " "${@}" +} + +# this is what we will do if it fails (head-less only) +failed() { + error "FAILED TO UPDATE NETDATA : ${1}" + + if [ -n "${tmp}" ]; then + cat >&2 "${tmp}" + rm "${tmp}" + fi + exit 1 +} + +update() { + [ -z "${tmp}" ] && info "Running on a terminal - (this script also supports running headless from crontab)" + + # Check if tmp is mounted as noexec + if grep -Eq '^[^ ]+ /tmp [^ ]+ ([^ ]*,)?noexec[, ]' /proc/mounts; then + pattern="/opt/netdata-updater-XXXXXX" + else + pattern="/tmp/netdata-updater-XXXXXX" + fi + + dir=$(mktemp -d "$pattern") + + cd "$dir" + + wget "${NETDATA_TARBALL_CHECKSUM_URL}" -O sha256sum.txt >&3 2>&3 + if grep "${NETDATA_TARBALL_CHECKSUM}" sha256sum.txt >&3 2>&3; then + info "Newest version is already installed" + exit 0 + fi + + wget "${NETDATA_TARBALL_URL}" -O netdata-latest.tar.gz >&3 2>&3 + if ! grep netdata-latest.tar.gz sha256sum.txt | sha256sum --check - >&3 2>&3; then + failed "Tarball checksum validation failed. Stopping netdata upgrade and leaving tarball in ${dir}" + fi + NEW_CHECKSUM="$(sha256sum netdata-latest.tar.gz 2>/dev/null| cut -d' ' -f1)" + tar -xf netdata-latest.tar.gz >&3 2>&3 + rm netdata-latest.tar.gz >&3 2>&3 + cd netdata-* + + info "Re-installing netdata..." + ${REINSTALL_COMMAND} --dont-wait ${do_not_start} >&3 2>&3 || failed "FAILED TO COMPILE/INSTALL NETDATA" + sed -i '/NETDATA_TARBALL/d' "${ENVIRONMENT_FILE}" + cat <>"${ENVIRONMENT_FILE}" +NETDATA_TARBALL_URL="$NETDATA_TARBALL_URL" +NETDATA_TARBALL_CHECKSUM_URL="$NETDATA_TARBALL_CHECKSUM_URL" +NETDATA_TARBALL_CHECKSUM="$NEW_CHECKSUM" +EOF + + rm -rf "${dir}" >&3 2>&3 + [ -n "${tmp}" ] && rm "${tmp}" && tmp= + return 0 +} + +# the installer updates this script - so we run and exit in a single line +update && exit 0 diff --git a/packaging/maintainers/README.md b/packaging/maintainers/README.md new file mode 100644 index 000000000..9fb36e771 --- /dev/null +++ b/packaging/maintainers/README.md @@ -0,0 +1,75 @@ +# Package Maintainers + +This page tracks the package maintainers for netdata, for various operating systems and versions. + +> Feel free to update it, so that it reflects the current status. + + +--- + +## Official Linux Distributions + +| Linux Distribution | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| Arch Linux | Release | @svenstaro | [netdata @ Arch Linux](https://www.archlinux.org/packages/community/x86_64/netdata/) | +| Arch Linux AUR | Git | @sanskritfritz | [netdata @ AUR](https://aur.archlinux.org/packages/netdata-git/) | +| Gentoo Linux | Release + Git | @candrews | [netdata @ gentoo](https://github.com/gentoo/gentoo/tree/master/net-analyzer/netdata) | +| Debian | Release | @lhw @FedericoCeratto | [netdata @ debian](http://salsa.debian.org/debian/netdata) | +| Slackware | Release | @willysr | [netdata @ slackbuilds](https://slackbuilds.org/repository/14.2/system/netdata/) | +| Ubuntu | | | | +| Red Hat / Fedora / Centos | | | | +| SUSE SLE / openSUSE Tumbleweed & Leap | | | [netdata @ SUSE OpenBuildService](https://software.opensuse.org/package/netdata) | + +--- +## FreeBSD + +| System | Initial PR | Core Developer | Package Maintainer +|:-:|:-:|:-:|:-:| +FreeBSD|#1321|@vlvkobal|@mmokhi + +--- +## MacOS + +| System | URL | Core Developer | Package Maintainer +|:-:|:-:|:-:|:-:| +MacOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen + +--- +## Unofficial Linux Packages + +| Linux Distribution | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| Ubuntu | Release | @gslin | [netdata @ gslin ppa](https://launchpad.net/~gslin/+archive/ubuntu/netdata) https://github.com/netdata/netdata/issues/69#issuecomment-217458543 | + +--- +## Embedded Linux + +| Embedded Linux | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| ASUSTOR NAS | ? | William Lin | https://www.asustor.com/apps/app_detail?id=532 | +| OpenWRT | Release | @nitroshift | [openwrt package](https://github.com/openwrt/packages/tree/master/admin/netdata) | +| ReadyNAS | Release | @NAStools | https://github.com/nastools/netdata | +| QNAP | Release | QNAP_Stephane | https://forum.qnap.com/viewtopic.php?t=121518 | +| DietPi | Release | @Fourdee | https://github.com/Fourdee/DietPi | + +--- +## Linux Containers + +| Containers | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| Docker | Git | @titpetric | https://github.com/titpetric/netdata | + +--- +## Automation Systems + +| Automation Systems | Netdata Version | Maintainer | Related URL | +| :-: | :-: | :-: | :-- | +| Ansible | git | @jffz | https://galaxy.ansible.com/jffz/netdata/ | +| Chef | ? | @sergiopena | https://github.com/sergiopena/netdata-cookbook | + +--- +## Packages summary from repology.org + +[![Packaging status](https://repology.org/badge/vertical-allrepos/netdata.svg)](https://repology.org/metapackage/netdata/versions) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fpackaging%2Fmaintainers%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/makeself/README.md b/packaging/makeself/README.md new file mode 100644 index 000000000..eb4c380b1 --- /dev/null +++ b/packaging/makeself/README.md @@ -0,0 +1,48 @@ +# netdata static binary build + +To build the static binary 64-bit distribution package, run: + +```bash +$ cd /path/to/netdata.git +$ ./packaging/makeself/build-x86_64-static.sh +``` + +The program will: + +1. setup a new docker container with Alpine Linux +2. install the required alpine packages (the build environment, needed libraries, etc) +3. download and compile third party apps that are packaged with netdata (`bash`, `curl`, etc) +4. compile netdata + +Once finished, a file named `netdata-vX.X.X-gGITHASH-x86_64-DATE-TIME.run` will be created in the current directory. This is the netdata binary package that can be run to install netdata on any other computer. + +--- + +## building binaries with debug info + +To build netdata binaries with debugging / tracing information in them, use: + +```bash +$ cd /path/to/netdata.git +$ ./packaging/makeself/build-x86_64-static.sh debug +``` + +These binaries are not optimized (they are a bit slower), they have certain features disables (like log flood protection), other features enables (like `debug flags`) and are not stripped (the binary files are bigger, since they now include source code tracing information). + +#### debugging netdata binaries + +Once you have installed a binary package with debugging info, you will need to install `valgrind` and run this command to start netdata: + +```bash +PATH="/opt/netdata/bin:${PATH}" valgrind --undef-value-errors=no /opt/netdata/bin/srv/netdata -D +``` + +The above command, will run netdata under `valgrind`. While netdata runs under `valgrind` it will be 10x slower and use a lot more memory. + +If netdata crashes, `valgrind` will print a stack trace of the issue. Open a github issue to let us know. + +To stop netdata while it runs under `valgrind`, press Control-C on the console. + +> If you omit the parameter `--undef-value-errors=no` to valgrind, you will get hundreds of errors about conditional jumps that depend on uninitialized values. This is normal. Valgrind has heuristics to prevent it from printing such errors for system libraries, but for the static netdata binary, all the required libraries are built into netdata. So, valgrind cannot appply its heuristics and prints them. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fmakeself%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/packaging/makeself/build-x86_64-static.sh b/packaging/makeself/build-x86_64-static.sh new file mode 100755 index 000000000..69ddf2bf5 --- /dev/null +++ b/packaging/makeself/build-x86_64-static.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "$0")/../installer/functions.sh || exit 1 + +set -e + +DOCKER_CONTAINER_NAME="netdata-package-x86_64-static-alpine37" + +if ! sudo docker inspect "${DOCKER_CONTAINER_NAME}" >/dev/null 2>&1 +then + # To run interactively: + # sudo docker run -it netdata-package-x86_64-static /bin/sh + # (add -v host-dir:guest-dir:rw arguments to mount volumes) + # + # To remove images in order to re-create: + # sudo docker rm -v $(sudo docker ps -a -q -f status=exited) + # sudo docker rmi netdata-package-x86_64-static + # + # This command maps the current directory to + # /usr/src/netdata.git + # inside the container and runs the script install-alpine-packages.sh + # (also inside the container) + # + run sudo docker run -v $(pwd):/usr/src/netdata.git:rw alpine:3.7 \ + /bin/sh /usr/src/netdata.git/packaging/makeself/install-alpine-packages.sh + + # save the changes made permanently + id=$(sudo docker ps -l -q) + run sudo docker commit ${id} "${DOCKER_CONTAINER_NAME}" +fi + +# Run the build script inside the container +run sudo docker run -a stdin -a stdout -a stderr -i -t -v \ + $(pwd):/usr/src/netdata.git:rw \ + "${DOCKER_CONTAINER_NAME}" \ + /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}" + +if [ "${USER}" ] + then + sudo chown -R "${USER}" . +fi diff --git a/packaging/makeself/build.sh b/packaging/makeself/build.sh new file mode 100755 index 000000000..e5804c523 --- /dev/null +++ b/packaging/makeself/build.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env sh +# SPDX-License-Identifier: GPL-3.0-or-later + +# ----------------------------------------------------------------------------- +# parse command line arguments + +export NETDATA_BUILD_WITH_DEBUG=0 + +while [ ! -z "${1}" ] +do + case "${1}" in + debug) + export NETDATA_BUILD_WITH_DEBUG=1 + ;; + + *) + ;; + esac + + shift +done + + +# ----------------------------------------------------------------------------- + +# First run install-alpine-packages.sh under alpine linux to install +# the required packages. build-x86_64-static.sh will do this for you +# using docker. + +cd $(dirname "$0") || exit 1 + +# if we don't run inside the netdata repo +# download it and run from it +if [ ! -f ../../netdata-installer.sh ] +then + git clone https://github.com/netdata/netdata.git netdata.git || exit 1 + cd netdata.git/makeself || exit 1 + ./build.sh "$@" + exit $? +fi + +cat >&2 < " + +if [ ! -d tmp ] + then + mkdir tmp || exit 1 +fi + +./run-all-jobs.sh "$@" +exit $? diff --git a/packaging/makeself/functions.sh b/packaging/makeself/functions.sh new file mode 100755 index 000000000..6c68e5907 --- /dev/null +++ b/packaging/makeself/functions.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# ----------------------------------------------------------------------------- + +# allow running the jobs by hand +[ -z "${NETDATA_BUILD_WITH_DEBUG}" ] && export NETDATA_BUILD_WITH_DEBUG=0 +[ -z "${NETDATA_INSTALL_PATH}" ] && export NETDATA_INSTALL_PATH="${1-/opt/netdata}" +[ -z "${NETDATA_MAKESELF_PATH}" ] && export NETDATA_MAKESELF_PATH="$(dirname "${0}")/../.." +[ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] && export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}" +[ -z "${NETDATA_SOURCE_PATH}" ] && export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.." +export NULL= + +# make sure the path does not end with / +if [ "${NETDATA_INSTALL_PATH:$(( ${#NETDATA_INSTALL_PATH} - 1)):1}" = "/" ] + then + export NETDATA_INSTALL_PATH="${NETDATA_INSTALL_PATH:0:$(( ${#NETDATA_INSTALL_PATH} - 1))}" +fi + +# find the parent directory +export NETDATA_INSTALL_PARENT="$(dirname "${NETDATA_INSTALL_PATH}")" + +# ----------------------------------------------------------------------------- + +# bash strict mode +set -euo pipefail + +# ----------------------------------------------------------------------------- + +fetch() { + local dir="${1}" url="${2}" + local tar="${dir}.tar.gz" + + if [ ! -f "${NETDATA_MAKESELF_PATH}/tmp/${tar}" ] + then + run wget -O "${NETDATA_MAKESELF_PATH}/tmp/${tar}" "${url}" + fi + + if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ] + then + cd "${NETDATA_MAKESELF_PATH}/tmp" + run tar -zxpf "${tar}" + cd - + fi + + run cd "${NETDATA_MAKESELF_PATH}/tmp/${dir}" +} + +# ----------------------------------------------------------------------------- + +# load the functions of the netdata-installer.sh +. "${NETDATA_SOURCE_PATH}/packaging/installer/functions.sh" + +# ----------------------------------------------------------------------------- + +# debug +echo "ME=${0}" +echo "NETDATA_INSTALL_PARENT=${NETDATA_INSTALL_PARENT}" +echo "NETDATA_INSTALL_PATH=${NETDATA_INSTALL_PATH}" +echo "NETDATA_MAKESELF_PATH=${NETDATA_MAKESELF_PATH}" +echo "NETDATA_SOURCE_PATH=${NETDATA_SOURCE_PATH}" +echo "PROCESSORS=${SYSTEM_CPUS}" diff --git a/packaging/makeself/install-alpine-packages.sh b/packaging/makeself/install-alpine-packages.sh new file mode 100755 index 000000000..695be4d4f --- /dev/null +++ b/packaging/makeself/install-alpine-packages.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env sh +# SPDX-License-Identifier: GPL-3.0-or-later + +# this script should be running in alpine linux +# install the required packages +apk update +apk add --no-cache \ + bash \ + wget \ + curl \ + ncurses \ + git \ + netcat-openbsd \ + alpine-sdk \ + autoconf \ + automake \ + gcc \ + make \ + libtool \ + pkgconfig \ + util-linux-dev \ + openssl-dev \ + gnutls-dev \ + zlib-dev \ + libmnl-dev \ + libnetfilter_acct-dev \ + || exit 1 diff --git a/packaging/makeself/install-or-update.sh b/packaging/makeself/install-or-update.sh new file mode 100755 index 000000000..bfcbe720a --- /dev/null +++ b/packaging/makeself/install-or-update.sh @@ -0,0 +1,225 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/functions.sh + +export LC_ALL=C +umask 002 + +# Be nice on production environments +renice 19 $$ >/dev/null 2>/dev/null + +# ----------------------------------------------------------------------------- + +STARTIT=1 + +while [ ! -z "${1}" ] +do + if [ "${1}" = "--dont-start-it" ] + then + STARTIT=0 + else + echo >&2 "Unknown option '${1}'. Ignoring it." + fi + shift +done + +deleted_stock_configs=0 +if [ ! -f "etc/netdata/.installer-cleanup-of-stock-configs-done" ] +then + + # ----------------------------------------------------------------------------- + progress "Deleting stock configuration files from user configuration directory" + + declare -A configs_signatures=() + source "system/configs.signatures" + + if [ ! -d etc/netdata ] + then + run mkdir -p etc/netdata + fi + + md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" + for x in $(find etc -type f) + do + # find it relative filename + f="${x/etc\/netdata\//}" + + # find the stock filename + t="${f/.conf.old/.conf}" + t="${t/.conf.orig/.conf}" + + if [ ! -z "${md5sum}" ] + then + # find the checksum of the existing file + md5="$( ${md5sum} <"${x}" | cut -d ' ' -f 1)" + #echo >&2 "md5: ${md5}" + + # check if it matches + if [ "${configs_signatures[${md5}]}" = "${t}" ] + then + # it matches the default + run rm -f "${x}" + deleted_stock_configs=$(( deleted_stock_configs + 1 )) + fi + fi + done + + touch "etc/netdata/.installer-cleanup-of-stock-configs-done" +fi + +# ----------------------------------------------------------------------------- +progress "Add user netdata to required user groups" + +NETDATA_USER="root" +NETDATA_GROUP="root" +add_netdata_user_and_group "/opt/netdata" +if [ $? -eq 0 ] + then + NETDATA_USER="netdata" + NETDATA_GROUP="netdata" +else + run_failed "Failed to add netdata user and group" +fi + + +# ----------------------------------------------------------------------------- +progress "Check SSL certificates paths" + +if [ ! -f "/etc/ssl/certs/ca-certificates.crt" ] +then + if [ ! -f /opt/netdata/.curlrc ] + then + cacert= + + # CentOS + [ -f "/etc/ssl/certs/ca-bundle.crt" ] && cacert="/etc/ssl/certs/ca-bundle.crt" + + if [ ! -z "${cacert}" ] + then + echo "Creating /opt/netdata/.curlrc with cacert=${cacert}" + echo >/opt/netdata/.curlrc "cacert=${cacert}" + else + run_failed "Failed to find /etc/ssl/certs/ca-certificates.crt" + fi + fi +fi + + +# ----------------------------------------------------------------------------- +progress "Install logrotate configuration for netdata" + +install_netdata_logrotate || run_failed "Cannot install logrotate file for netdata." + + +# ----------------------------------------------------------------------------- +progress "Install netdata at system init" + +install_netdata_service || run_failed "Cannot install netdata init service." + + +# ----------------------------------------------------------------------------- +progress "creating quick links" + +dir_should_be_link() { + local p="${1}" t="${2}" d="${3}" old + + old="${PWD}" + cd "${p}" || return 0 + + if [ -e "${d}" ] + then + if [ -h "${d}" ] + then + run rm "${d}" + else + run mv -f "${d}" "${d}.old.$$" + fi + fi + + run ln -s "${t}" "${d}" + cd "${old}" +} + +dir_should_be_link . bin sbin +dir_should_be_link usr ../bin bin +dir_should_be_link usr ../bin sbin +dir_should_be_link usr . local + +dir_should_be_link . etc/netdata netdata-configs +dir_should_be_link . usr/share/netdata/web netdata-web-files +dir_should_be_link . usr/libexec/netdata netdata-plugins +dir_should_be_link . var/lib/netdata netdata-dbs +dir_should_be_link . var/cache/netdata netdata-metrics +dir_should_be_link . var/log/netdata netdata-logs + +dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d orig + +if [ ${deleted_stock_configs} -gt 0 ] +then + dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d "000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES" +fi + + +# ----------------------------------------------------------------------------- + +progress "create user config directories" + +for x in "python.d" "charts.d" "node.d" "health.d" "statsd.d" +do + if [ ! -d "etc/netdata/${x}" ] + then + run mkdir -p "etc/netdata/${x}" || exit 1 + fi +done + + +# ----------------------------------------------------------------------------- +progress "fix permissions" + +run chmod g+rx,o+rx /opt +run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata + + +# ----------------------------------------------------------------------------- + +progress "fix plugin permissions" + +for x in apps.plugin freeipmi.plugin cgroup-network +do + f="usr/libexec/netdata/plugins.d/${x}" + + if [ -f "${f}" ] + then + run chown root:${NETDATA_GROUP} "${f}" + run chmod 4750 "${f}" + fi +done + +# fix the fping binary +if [ -f bin/fping ] +then + run chown root:${NETDATA_GROUP} bin/fping + run chmod 4750 bin/fping +fi + + +# ----------------------------------------------------------------------------- + +if [ ${STARTIT} -eq 1 ] +then + progress "starting netdata" + + restart_netdata "/opt/netdata/bin/netdata" + if [ $? -eq 0 ] + then + download_netdata_conf "${NETDATA_USER}:${NETDATA_GROUP}" "/opt/netdata/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf" + netdata_banner "is installed and running now!" + else + generate_netdata_conf "${NETDATA_USER}:${NETDATA_GROUP}" "/opt/netdata/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf" + netdata_banner "is installed now!" + fi +else + generate_netdata_conf "${NETDATA_USER}:${NETDATA_GROUP}" "/opt/netdata/etc/netdata/netdata.conf" "http://localhost:19999/netdata.conf" + netdata_banner "is installed now!" +fi diff --git a/packaging/makeself/jobs/10-prepare-destination.install.sh b/packaging/makeself/jobs/10-prepare-destination.install.sh new file mode 100755 index 000000000..06dc82f29 --- /dev/null +++ b/packaging/makeself/jobs/10-prepare-destination.install.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +[ -d "${NETDATA_INSTALL_PATH}.old" ] && run rm -rf "${NETDATA_INSTALL_PATH}.old" +[ -d "${NETDATA_INSTALL_PATH}" ] && run mv -f "${NETDATA_INSTALL_PATH}" "${NETDATA_INSTALL_PATH}.old" + +run mkdir -p "${NETDATA_INSTALL_PATH}/bin" +run mkdir -p "${NETDATA_INSTALL_PATH}/usr" +run cd "${NETDATA_INSTALL_PATH}" +run ln -s bin sbin +run cd "${NETDATA_INSTALL_PATH}/usr" +run ln -s ../bin bin +run ln -s ../sbin sbin +run ln -s . local diff --git a/packaging/makeself/jobs/50-bash-4.4.18.install.sh b/packaging/makeself/jobs/50-bash-4.4.18.install.sh new file mode 100755 index 000000000..3bdf3e751 --- /dev/null +++ b/packaging/makeself/jobs/50-bash-4.4.18.install.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +. $(dirname "${0}")/../functions.sh "${@}" || exit 1 + +fetch "bash-4.4.18" "http://ftp.gnu.org/gnu/bash/bash-4.4.18.tar.gz" + +run ./configure \ + --prefix=${NETDATA_INSTALL_PATH} \ + --without-bash-malloc \ + --enable-static-link \ + --enable-net-redirections \ + --enable-array-variables \ + --disable-profiling \ + --disable-nls \ +# --disable-rpath \ +# --enable-alias \ +# --enable-arith-for-command \ +# --enable-array-variables \ +# --enable-brace-expansion \ +# --enable-casemod-attributes \ +# --enable-casemod-expansions \ +# --enable-command-timing \ +# --enable-cond-command \ +# --enable-cond-regexp \ +# --enable-directory-stack \ +# --enable-dparen-arithmetic \ +# --enable-function-import \ +# --enable-glob-asciiranges-default \ +# --enable-help-builtin \ +# --enable-job-control \ +# --enable-net-redirections \ +# --enable-process-substitution \ +# --enable-progcomp \ +# --enable-prompt-string-decoding \ +# --enable-readline \ +# --enable-select \ + + +run make clean +run make -j${SYSTEM_CPUS} + +cat >examples/loadables/Makefile <doc/Makefile <&2 "Cannot find version number. Create makeself executable from source code with git tree structure." + exit 1 +fi + +# ----------------------------------------------------------------------------- +# copy the files needed by makeself installation + +run mkdir -p "${NETDATA_INSTALL_PATH}/system" + +run cp \ + packaging/makeself/post-installer.sh \ + packaging/makeself/install-or-update.sh \ + packaging/installer/functions.sh \ + configs.signatures \ + system/netdata-init-d \ + system/netdata-lsb \ + system/netdata-openrc \ + system/netdata.logrotate \ + system/netdata.service \ + "${NETDATA_INSTALL_PATH}/system/" + + +# ----------------------------------------------------------------------------- +# create a wrapper to start our netdata with a modified path + +run mkdir -p "${NETDATA_INSTALL_PATH}/bin/srv" + +run mv "${NETDATA_INSTALL_PATH}/bin/netdata" \ + "${NETDATA_INSTALL_PATH}/bin/srv/netdata" || exit 1 + +cat >"${NETDATA_INSTALL_PATH}/bin/netdata" <"${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" + +run "${NETDATA_MAKESELF_PATH}/makeself.sh" \ + --gzip \ + --complevel 9 \ + --notemp \ + --needroot \ + --target "${NETDATA_INSTALL_PATH}" \ + --header "${NETDATA_MAKESELF_PATH}/makeself-header.sh" \ + --lsm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" \ + --license "${NETDATA_MAKESELF_PATH}/makeself-license.txt" \ + --help-header "${NETDATA_MAKESELF_PATH}/makeself-help-header.txt" \ + "${NETDATA_INSTALL_PATH}" \ + "${NETDATA_INSTALL_PATH}.gz.run" \ + "netdata, the real-time performance and health monitoring system" \ + ./system/post-installer.sh \ + ${NULL} + +run rm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" + +# ----------------------------------------------------------------------------- +# copy it to the netdata build dir + +FILE="netdata-${VERSION}.gz.run" + +run mkdir -p artifacts +run mv "${NETDATA_INSTALL_PATH}.gz.run" "artifacts/${FILE}" + +[ -f netdata-latest.gz.run ] && rm netdata-latest.gz.run +run ln -s "artifacts/${FILE}" netdata-latest.gz.run + +echo >&2 "Self-extracting installer moved to 'artifacts/${FILE}'" diff --git a/packaging/makeself/makeself-header.sh b/packaging/makeself/makeself-header.sh new file mode 100755 index 000000000..d77e1717c --- /dev/null +++ b/packaging/makeself/makeself-header.sh @@ -0,0 +1,554 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +cat << EOF > "$archname" +#!/bin/sh +# This script was generated using Makeself $MS_VERSION + +ORIG_UMASK=\`umask\` +if test "$KEEP_UMASK" = n; then + umask 077 +fi + +CRCsum="$CRCsum" +MD5="$MD5sum" +TMPROOT=\${TMPDIR:=/tmp} +USER_PWD="\$PWD"; export USER_PWD + +label="$LABEL" +script="$SCRIPT" +scriptargs="$SCRIPTARGS" +licensetxt="$LICENSE" +helpheader='$HELPHEADER' +targetdir="$archdirname" +filesizes="$filesizes" +keep="$KEEP" +nooverwrite="$NOOVERWRITE" +quiet="n" +accept="n" +nodiskspace="n" +export_conf="$EXPORT_CONF" + +print_cmd_arg="" +if type printf > /dev/null; then + print_cmd="printf" +elif test -x /usr/ucb/echo; then + print_cmd="/usr/ucb/echo" +else + print_cmd="echo" +fi + +if test -d /usr/xpg4/bin; then + PATH=/usr/xpg4/bin:\$PATH + export PATH +fi + +unset CDPATH + +MS_Printf() +{ + \$print_cmd \$print_cmd_arg "\$1" +} + +MS_PrintLicense() +{ + if test x"\$licensetxt" != x; then + echo "\$licensetxt" + if test x"\$accept" != xy; then + while true + do + MS_Printf "Please type y to accept, n otherwise: " + read yn + if test x"\$yn" = xn; then + keep=n + eval \$finish; exit 1 + break; + elif test x"\$yn" = xy; then + break; + fi + done + fi + fi +} + +MS_diskspace() +{ + ( + df -kP "\$1" | tail -1 | awk '{ if (\$4 ~ /%/) {print \$3} else {print \$4} }' + ) +} + +MS_dd() +{ + blocks=\`expr \$3 / 1024\` + bytes=\`expr \$3 % 1024\` + dd if="\$1" ibs=\$2 skip=1 obs=1024 conv=sync 2> /dev/null | \\ + { test \$blocks -gt 0 && dd ibs=1024 obs=1024 count=\$blocks ; \\ + test \$bytes -gt 0 && dd ibs=1 obs=1024 count=\$bytes ; } 2> /dev/null +} + +MS_dd_Progress() +{ + if test x"\$noprogress" = xy; then + MS_dd \$@ + return \$? + fi + file="\$1" + offset=\$2 + length=\$3 + pos=0 + bsize=4194304 + while test \$bsize -gt \$length; do + bsize=\`expr \$bsize / 4\` + done + blocks=\`expr \$length / \$bsize\` + bytes=\`expr \$length % \$bsize\` + ( + dd ibs=\$offset skip=1 2>/dev/null + pos=\`expr \$pos \+ \$bsize\` + MS_Printf " 0%% " 1>&2 + if test \$blocks -gt 0; then + while test \$pos -le \$length; do + dd bs=\$bsize count=1 2>/dev/null + pcent=\`expr \$length / 100\` + pcent=\`expr \$pos / \$pcent\` + if test \$pcent -lt 100; then + MS_Printf "\b\b\b\b\b\b\b" 1>&2 + if test \$pcent -lt 10; then + MS_Printf " \$pcent%% " 1>&2 + else + MS_Printf " \$pcent%% " 1>&2 + fi + fi + pos=\`expr \$pos \+ \$bsize\` + done + fi + if test \$bytes -gt 0; then + dd bs=\$bytes count=1 2>/dev/null + fi + MS_Printf "\b\b\b\b\b\b\b" 1>&2 + MS_Printf " 100%% " 1>&2 + ) < "\$file" +} + +MS_Help() +{ + cat << EOH >&2 +\${helpheader}Makeself version $MS_VERSION + 1) Getting help or info about \$0 : + \$0 --help Print this message + \$0 --info Print embedded info : title, default target directory, embedded script ... + \$0 --lsm Print embedded lsm entry (or no LSM) + \$0 --list Print the list of files in the archive + \$0 --check Checks integrity of the archive + + 2) Running \$0 : + \$0 [options] [--] [additional arguments to embedded script] + with following options (in that order) + --confirm Ask before running embedded script + --quiet Do not print anything except error messages + --accept Accept the license + --noexec Do not run embedded script + --keep Do not erase target directory after running + the embedded script + --noprogress Do not show the progress during the decompression + --nox11 Do not spawn an xterm + --nochown Do not give the extracted files to the current user + --nodiskspace Do not check for available disk space + --target dir Extract directly to a target directory + directory path can be either absolute or relative + --tar arg1 [arg2 ...] Access the contents of the archive through the tar command + -- Following arguments will be passed to the embedded script +EOH +} + +MS_Check() +{ + OLD_PATH="\$PATH" + PATH=\${GUESS_MD5_PATH:-"\$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"} + MD5_ARG="" + MD5_PATH=\`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum\` + test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which md5 || command -v md5 || type md5\` + test -x "\$MD5_PATH" || MD5_PATH=\`exec <&- 2>&-; which digest || command -v digest || type digest\` + PATH="\$OLD_PATH" + + if test x"\$quiet" = xn; then + MS_Printf "Verifying archive integrity..." + fi + offset=\`head -n $SKIP "\$1" | wc -c | tr -d " "\` + verb=\$2 + i=1 + for s in \$filesizes + do + crc=\`echo \$CRCsum | cut -d" " -f\$i\` + if test -x "\$MD5_PATH"; then + if test x"\`basename \$MD5_PATH\`" = xdigest; then + MD5_ARG="-a md5" + fi + md5=\`echo \$MD5 | cut -d" " -f\$i\` + if test x"\$md5" = x00000000000000000000000000000000; then + test x"\$verb" = xy && echo " \$1 does not contain an embedded MD5 checksum." >&2 + else + md5sum=\`MS_dd_Progress "\$1" \$offset \$s | eval "\$MD5_PATH \$MD5_ARG" | cut -b-32\`; + if test x"\$md5sum" != x"\$md5"; then + echo "Error in MD5 checksums: \$md5sum is different from \$md5" >&2 + exit 2 + else + test x"\$verb" = xy && MS_Printf " MD5 checksums are OK." >&2 + fi + crc="0000000000"; verb=n + fi + fi + if test x"\$crc" = x0000000000; then + test x"\$verb" = xy && echo " \$1 does not contain a CRC checksum." >&2 + else + sum1=\`MS_dd_Progress "\$1" \$offset \$s | CMD_ENV=xpg4 cksum | awk '{print \$1}'\` + if test x"\$sum1" = x"\$crc"; then + test x"\$verb" = xy && MS_Printf " CRC checksums are OK." >&2 + else + echo "Error in checksums: \$sum1 is different from \$crc" >&2 + exit 2; + fi + fi + i=\`expr \$i + 1\` + offset=\`expr \$offset + \$s\` + done + if test x"\$quiet" = xn; then + echo " All good." + fi +} + +UnTAR() +{ + if test x"\$quiet" = xn; then + tar \$1vf - $UNTAR_EXTRA 2>&1 || { echo " ... Extraction failed." > /dev/tty; kill -15 \$$; } + else + tar \$1f - $UNTAR_EXTRA 2>&1 || { echo Extraction failed. > /dev/tty; kill -15 \$$; } + fi +} + +finish=true +xterm_loop= +noprogress=$NOPROGRESS +nox11=$NOX11 +copy=$COPY +ownership=y +verbose=n + +initargs="\$@" + +while true +do + case "\$1" in + -h | --help) + MS_Help + exit 0 + ;; + -q | --quiet) + quiet=y + noprogress=y + shift + ;; + --accept) + accept=y + shift + ;; + --info) + echo Identification: "\$label" + echo Target directory: "\$targetdir" + echo Uncompressed size: $USIZE KB + echo Compression: $COMPRESS + echo Date of packaging: $DATE + echo Built with Makeself version $MS_VERSION on $OSTYPE + echo Build command was: "$MS_COMMAND" + if test x"\$script" != x; then + echo Script run after extraction: + echo " " \$script \$scriptargs + fi + if test x"$copy" = xcopy; then + echo "Archive will copy itself to a temporary location" + fi + if test x"$NEED_ROOT" = xy; then + echo "Root permissions required for extraction" + fi + if test x"$KEEP" = xy; then + echo "directory \$targetdir is permanent" + else + echo "\$targetdir will be removed after extraction" + fi + exit 0 + ;; + --dumpconf) + echo LABEL=\"\$label\" + echo SCRIPT=\"\$script\" + echo SCRIPTARGS=\"\$scriptargs\" + echo archdirname=\"$archdirname\" + echo KEEP=$KEEP + echo NOOVERWRITE=$NOOVERWRITE + echo COMPRESS=$COMPRESS + echo filesizes=\"\$filesizes\" + echo CRCsum=\"\$CRCsum\" + echo MD5sum=\"\$MD5\" + echo OLDUSIZE=$USIZE + echo OLDSKIP=`expr $SKIP + 1` + exit 0 + ;; + --lsm) +cat << EOLSM +EOF +eval "$LSM_CMD" +cat << EOF >> "$archname" +EOLSM + exit 0 + ;; + --list) + echo Target directory: \$targetdir + offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\` + for s in \$filesizes + do + MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | UnTAR t + offset=\`expr \$offset + \$s\` + done + exit 0 + ;; + --tar) + offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\` + arg1="\$2" + if ! shift 2; then MS_Help; exit 1; fi + for s in \$filesizes + do + MS_dd "\$0" \$offset \$s | eval "$GUNZIP_CMD" | tar "\$arg1" - "\$@" + offset=\`expr \$offset + \$s\` + done + exit 0 + ;; + --check) + MS_Check "\$0" y + exit 0 + ;; + --confirm) + verbose=y + shift + ;; + --noexec) + script="" + shift + ;; + --keep) + keep=y + shift + ;; + --target) + keep=y + targetdir=\${2:-.} + if ! shift 2; then MS_Help; exit 1; fi + ;; + --noprogress) + noprogress=y + shift + ;; + --nox11) + nox11=y + shift + ;; + --nochown) + ownership=n + shift + ;; + --nodiskspace) + nodiskspace=y + shift + ;; + --xwin) + if test "$NOWAIT" = n; then + finish="echo Press Return to close this window...; read junk" + fi + xterm_loop=1 + shift + ;; + --phase2) + copy=phase2 + shift + ;; + --) + shift + break ;; + -*) + echo Unrecognized flag : "\$1" >&2 + MS_Help + exit 1 + ;; + *) + break ;; + esac +done + +if test x"\$quiet" = xy -a x"\$verbose" = xy; then + echo Cannot be verbose and quiet at the same time. >&2 + exit 1 +fi + +if test x"$NEED_ROOT" = xy -a \`id -u\` -ne 0; then + echo "Administrative privileges required for this archive (use su or sudo)" >&2 + exit 1 +fi + +if test x"\$copy" \!= xphase2; then + MS_PrintLicense +fi + +case "\$copy" in +copy) + tmpdir=\$TMPROOT/makeself.\$RANDOM.\`date +"%y%m%d%H%M%S"\`.\$\$ + mkdir "\$tmpdir" || { + echo "Could not create temporary directory \$tmpdir" >&2 + exit 1 + } + SCRIPT_COPY="\$tmpdir/makeself" + echo "Copying to a temporary location..." >&2 + cp "\$0" "\$SCRIPT_COPY" + chmod +x "\$SCRIPT_COPY" + cd "\$TMPROOT" + exec "\$SCRIPT_COPY" --phase2 -- \$initargs + ;; +phase2) + finish="\$finish ; rm -rf \`dirname \$0\`" + ;; +esac + +if test x"\$nox11" = xn; then + if tty -s; then # Do we have a terminal? + : + else + if test x"\$DISPLAY" != x -a x"\$xterm_loop" = x; then # No, but do we have X? + if xset q > /dev/null 2>&1; then # Check for valid DISPLAY variable + GUESS_XTERMS="xterm gnome-terminal rxvt dtterm eterm Eterm xfce4-terminal lxterminal kvt konsole aterm terminology" + for a in \$GUESS_XTERMS; do + if type \$a >/dev/null 2>&1; then + XTERM=\$a + break + fi + done + chmod a+x \$0 || echo Please add execution rights on \$0 + if test \`echo "\$0" | cut -c1\` = "/"; then # Spawn a terminal! + exec \$XTERM -title "\$label" -e "\$0" --xwin "\$initargs" + else + exec \$XTERM -title "\$label" -e "./\$0" --xwin "\$initargs" + fi + fi + fi + fi +fi + +if test x"\$targetdir" = x.; then + tmpdir="." +else + if test x"\$keep" = xy; then + if test x"\$nooverwrite" = xy && test -d "\$targetdir"; then + echo "Target directory \$targetdir already exists, aborting." >&2 + exit 1 + fi + if test x"\$quiet" = xn; then + echo "Creating directory \$targetdir" >&2 + fi + tmpdir="\$targetdir" + dashp="-p" + else + tmpdir="\$TMPROOT/selfgz\$\$\$RANDOM" + dashp="" + fi + mkdir \$dashp \$tmpdir || { + echo 'Cannot create target directory' \$tmpdir >&2 + echo 'You should try option --target dir' >&2 + eval \$finish + exit 1 + } +fi + +location="\`pwd\`" +if test x"\$SETUP_NOCHECK" != x1; then + MS_Check "\$0" +fi +offset=\`head -n $SKIP "\$0" | wc -c | tr -d " "\` + +if test x"\$verbose" = xy; then + MS_Printf "About to extract $USIZE KB in \$tmpdir ... Proceed ? [Y/n] " + read yn + if test x"\$yn" = xn; then + eval \$finish; exit 1 + fi +fi + +if test x"\$quiet" = xn; then + MS_Printf "Uncompressing \$label" +fi +res=3 +if test x"\$keep" = xn; then + trap 'echo Signal caught, cleaning up >&2; cd \$TMPROOT; /bin/rm -rf \$tmpdir; eval \$finish; exit 15' 1 2 3 15 +fi + +if test x"\$nodiskspace" = xn; then + leftspace=\`MS_diskspace \$tmpdir\` + if test -n "\$leftspace"; then + if test "\$leftspace" -lt $USIZE; then + echo + echo "Not enough space left in "\`dirname \$tmpdir\`" (\$leftspace KB) to decompress \$0 ($USIZE KB)" >&2 + echo "Use --nodiskspace option to skip this check and proceed anyway" >&2 + if test x"\$keep" = xn; then + echo "Consider setting TMPDIR to a directory with more free space." + fi + eval \$finish; exit 1 + fi + fi +fi + +for s in \$filesizes +do + if MS_dd_Progress "\$0" \$offset \$s | eval "$GUNZIP_CMD" | ( cd "\$tmpdir"; umask \$ORIG_UMASK ; UnTAR xp ) 1>/dev/null; then + if test x"\$ownership" = xy; then + (cd "\$tmpdir"; chown -R \`id -u\` .; chgrp -R \`id -g\` .) + fi + else + echo >&2 + echo "Unable to decompress \$0" >&2 + eval \$finish; exit 1 + fi + offset=\`expr \$offset + \$s\` +done +if test x"\$quiet" = xn; then + echo +fi + +cd "\$tmpdir" +res=0 +if test x"\$script" != x; then + if test x"\$export_conf" = x"y"; then + MS_BUNDLE="\$0" + MS_LABEL="\$label" + MS_SCRIPT="\$script" + MS_SCRIPTARGS="\$scriptargs" + MS_ARCHDIRNAME="\$archdirname" + MS_KEEP="\$KEEP" + MS_NOOVERWRITE="\$NOOVERWRITE" + MS_COMPRESS="\$COMPRESS" + export MS_BUNDLE MS_LABEL MS_SCRIPT MS_SCRIPTARGS + export MS_ARCHDIRNAME MS_KEEP MS_NOOVERWRITE MS_COMPRESS + fi + + if test x"\$verbose" = x"y"; then + MS_Printf "OK to execute: \$script \$scriptargs \$* ? [Y/n] " + read yn + if test x"\$yn" = x -o x"\$yn" = xy -o x"\$yn" = xY; then + eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$?; + fi + else + eval "\"\$script\" \$scriptargs \"\\\$@\""; res=\$? + fi + if test "\$res" -ne 0; then + test x"\$verbose" = xy && echo "The program '\$script' returned an error code (\$res)" >&2 + fi +fi +if test x"\$keep" = xn; then + cd \$TMPROOT + /bin/rm -rf \$tmpdir +fi +eval \$finish; exit \$res +EOF diff --git a/packaging/makeself/makeself-help-header.txt b/packaging/makeself/makeself-help-header.txt new file mode 100644 index 000000000..bf482c465 --- /dev/null +++ b/packaging/makeself/makeself-help-header.txt @@ -0,0 +1,44 @@ + + ^ + |.-. .-. .-. .-. . netdata + | '-' '-' '-' '-' real-time performance monitoring, done right! + +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---> + + (C) Copyright 2017, Costa Tsaousis + All rights reserved + Released under GPL v3+ + + You are about to install netdata to this system. + netdata will be installed at: + + /opt/netdata + + The following changes will be made to your system: + + # USERS / GROUPS + User 'netdata' and group 'netdata' will be added, if not present. + + # LOGROTATE + This file will be installed if logrotate is present. + + - /etc/logrotate.d/netdata + + # SYSTEM INIT + This file will be installed if this system runs with systemd: + + - /lib/systemd/system/netdata.service + + or, for older Centos, Debian/Ubuntu or OpenRC Gentoo: + + - /etc/init.d/netdata will be created + + + This package can also update a netdata installation that has been + created with another version of it. + + Your netdata configuration will be retained. + After installation, netdata will be (re-)started. + + netdata re-distributes a lot of open source software components. + Check its full license at: + https://github.com/netdata/netdata/blob/master/LICENSE.md diff --git a/packaging/makeself/makeself-license.txt b/packaging/makeself/makeself-license.txt new file mode 100644 index 000000000..bf482c465 --- /dev/null +++ b/packaging/makeself/makeself-license.txt @@ -0,0 +1,44 @@ + + ^ + |.-. .-. .-. .-. . netdata + | '-' '-' '-' '-' real-time performance monitoring, done right! + +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---> + + (C) Copyright 2017, Costa Tsaousis + All rights reserved + Released under GPL v3+ + + You are about to install netdata to this system. + netdata will be installed at: + + /opt/netdata + + The following changes will be made to your system: + + # USERS / GROUPS + User 'netdata' and group 'netdata' will be added, if not present. + + # LOGROTATE + This file will be installed if logrotate is present. + + - /etc/logrotate.d/netdata + + # SYSTEM INIT + This file will be installed if this system runs with systemd: + + - /lib/systemd/system/netdata.service + + or, for older Centos, Debian/Ubuntu or OpenRC Gentoo: + + - /etc/init.d/netdata will be created + + + This package can also update a netdata installation that has been + created with another version of it. + + Your netdata configuration will be retained. + After installation, netdata will be (re-)started. + + netdata re-distributes a lot of open source software components. + Check its full license at: + https://github.com/netdata/netdata/blob/master/LICENSE.md diff --git a/packaging/makeself/makeself.lsm b/packaging/makeself/makeself.lsm new file mode 100644 index 000000000..6bd4703db --- /dev/null +++ b/packaging/makeself/makeself.lsm @@ -0,0 +1,16 @@ +Begin3 +Title: netdata +Version: NETDATA_VERSION +Description: netdata is a system for distributed real-time performance and health monitoring. + It provides unparalleled insights, in real-time, of everything happening on the + system it runs (including applications such as web and database servers), using + modern interactive web dashboards. netdata is fast and efficient, designed to + permanently run on all systems (physical & virtual servers, containers, IoT + devices), without disrupting their core function. +Keywords: real-time performance and health monitoring +Author: Costa Tsaousis (costa@tsaousis.gr) +Maintained-by: Costa Tsaousis (costa@tsaousis.gr) +Original-site: https://my-netdata.io/ +Platform: Unix +Copying-policy: GPL +End diff --git a/packaging/makeself/makeself.sh b/packaging/makeself/makeself.sh new file mode 100755 index 000000000..f3cb69976 --- /dev/null +++ b/packaging/makeself/makeself.sh @@ -0,0 +1,621 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-3.0-or-later +# +# Makeself version 2.3.x +# by Stephane Peter +# +# Utility to create self-extracting tar.gz archives. +# The resulting archive is a file holding the tar.gz archive with +# a small Shell script stub that uncompresses the archive to a temporary +# directory and then executes a given script from withing that directory. +# +# Makeself home page: http://makeself.io/ +# +# Version 2.0 is a rewrite of version 1.0 to make the code easier to read and maintain. +# +# Version history : +# - 1.0 : Initial public release +# - 1.1 : The archive can be passed parameters that will be passed on to +# the embedded script, thanks to John C. Quillan +# - 1.2 : Package distribution, bzip2 compression, more command line options, +# support for non-temporary archives. Ideas thanks to Francois Petitjean +# - 1.3 : More patches from Bjarni R. Einarsson and Francois Petitjean: +# Support for no compression (--nocomp), script is no longer mandatory, +# automatic launch in an xterm, optional verbose output, and -target +# archive option to indicate where to extract the files. +# - 1.4 : Improved UNIX compatibility (Francois Petitjean) +# Automatic integrity checking, support of LSM files (Francois Petitjean) +# - 1.5 : Many bugfixes. Optionally disable xterm spawning. +# - 1.5.1 : More bugfixes, added archive options -list and -check. +# - 1.5.2 : Cosmetic changes to inform the user of what's going on with big +# archives (Quake III demo) +# - 1.5.3 : Check for validity of the DISPLAY variable before launching an xterm. +# More verbosity in xterms and check for embedded command's return value. +# Bugfix for Debian 2.0 systems that have a different "print" command. +# - 1.5.4 : Many bugfixes. Print out a message if the extraction failed. +# - 1.5.5 : More bugfixes. Added support for SETUP_NOCHECK environment variable to +# bypass checksum verification of archives. +# - 1.6.0 : Compute MD5 checksums with the md5sum command (patch from Ryan Gordon) +# - 2.0 : Brand new rewrite, cleaner architecture, separated header and UNIX ports. +# - 2.0.1 : Added --copy +# - 2.1.0 : Allow multiple tarballs to be stored in one archive, and incremental updates. +# Added --nochown for archives +# Stopped doing redundant checksums when not necesary +# - 2.1.1 : Work around insane behavior from certain Linux distros with no 'uncompress' command +# Cleaned up the code to handle error codes from compress. Simplified the extraction code. +# - 2.1.2 : Some bug fixes. Use head -n to avoid problems. +# - 2.1.3 : Bug fixes with command line when spawning terminals. +# Added --tar for archives, allowing to give arbitrary arguments to tar on the contents of the archive. +# Added --noexec to prevent execution of embedded scripts. +# Added --nomd5 and --nocrc to avoid creating checksums in archives. +# Added command used to create the archive in --info output. +# Run the embedded script through eval. +# - 2.1.4 : Fixed --info output. +# Generate random directory name when extracting files to . to avoid problems. (Jason Trent) +# Better handling of errors with wrong permissions for the directory containing the files. (Jason Trent) +# Avoid some race conditions (Ludwig Nussel) +# Unset the $CDPATH variable to avoid problems if it is set. (Debian) +# Better handling of dot files in the archive directory. +# - 2.1.5 : Made the md5sum detection consistent with the header code. +# Check for the presence of the archive directory +# Added --encrypt for symmetric encryption through gpg (Eric Windisch) +# Added support for the digest command on Solaris 10 for MD5 checksums +# Check for available disk space before extracting to the target directory (Andreas Schweitzer) +# Allow extraction to run asynchronously (patch by Peter Hatch) +# Use file descriptors internally to avoid error messages (patch by Kay Tiong Khoo) +# - 2.1.6 : Replaced one dot per file progress with a realtime progress percentage and a spining cursor (Guy Baconniere) +# Added --noprogress to prevent showing the progress during the decompression (Guy Baconniere) +# Added --target dir to allow extracting directly to a target directory (Guy Baconniere) +# - 2.2.0 : Many bugfixes, updates and contributions from users. Check out the project page on Github for the details. +# - 2.3.0 : Option to specify packaging date to enable byte-for-byte reproducibility. (Marc Pawlowsky) +# +# (C) 1998-2017 by Stephane Peter +# +# This software is released under the terms of the GNU GPL version 2 and above +# Please read the license at http://www.gnu.org/copyleft/gpl.html +# + +MS_VERSION=2.3.1 +MS_COMMAND="$0" +unset CDPATH + +for f in "${1+"$@"}"; do + MS_COMMAND="$MS_COMMAND \\\\ + \\\"$f\\\"" +done + +# For Solaris systems +if test -d /usr/xpg4/bin; then + PATH=/usr/xpg4/bin:$PATH + export PATH +fi + +# Procedures + +MS_Usage() +{ + echo "Usage: $0 [params] archive_dir file_name label startup_script [args]" + echo "params can be one or more of the following :" + echo " --version | -v : Print out Makeself version number and exit" + echo " --help | -h : Print out this help message" + echo " --tar-quietly : Suppress verbose output from the tar command" + echo " --quiet | -q : Do not print any messages other than errors." + echo " --gzip : Compress using gzip (default if detected)" + echo " --pigz : Compress with pigz" + echo " --bzip2 : Compress using bzip2 instead of gzip" + echo " --pbzip2 : Compress using pbzip2 instead of gzip" + echo " --xz : Compress using xz instead of gzip" + echo " --lzo : Compress using lzop instead of gzip" + echo " --lz4 : Compress using lz4 instead of gzip" + echo " --compress : Compress using the UNIX 'compress' command" + echo " --complevel lvl : Compression level for gzip pigz xz lzo lz4 bzip2 and pbzip2 (default 9)" + echo " --base64 : Instead of compressing, encode the data using base64" + echo " --gpg-encrypt : Instead of compressing, encrypt the data using GPG" + echo " --gpg-asymmetric-encrypt-sign" + echo " : Instead of compressing, asymmetrically encrypt and sign the data using GPG" + echo " --gpg-extra opt : Append more options to the gpg command line" + echo " --ssl-encrypt : Instead of compressing, encrypt the data using OpenSSL" + echo " --nocomp : Do not compress the data" + echo " --notemp : The archive will create archive_dir in the" + echo " current directory and uncompress in ./archive_dir" + echo " --needroot : Check that the root user is extracting the archive before proceeding" + echo " --copy : Upon extraction, the archive will first copy itself to" + echo " a temporary directory" + echo " --append : Append more files to an existing Makeself archive" + echo " The label and startup scripts will then be ignored" + echo " --target dir : Extract directly to a target directory" + echo " directory path can be either absolute or relative" + echo " --nooverwrite : Do not extract the archive if the specified target directory exists" + echo " --current : Files will be extracted to the current directory" + echo " Both --current and --target imply --notemp" + echo " --tar-extra opt : Append more options to the tar command line" + echo " --untar-extra opt : Append more options to the during the extraction of the tar archive" + echo " --nomd5 : Don't calculate an MD5 for archive" + echo " --nocrc : Don't calculate a CRC for archive" + echo " --header file : Specify location of the header script" + echo " --follow : Follow the symlinks in the archive" + echo " --noprogress : Do not show the progress during the decompression" + echo " --nox11 : Disable automatic spawn of a xterm" + echo " --nowait : Do not wait for user input after executing embedded" + echo " program from an xterm" + echo " --lsm file : LSM file describing the package" + echo " --license file : Append a license file" + echo " --help-header file : Add a header to the archive's --help output" + echo " --packaging-date date" + echo " : Use provided string as the packaging date" + echo " instead of the current date." + echo + echo " --keep-umask : Keep the umask set to shell default, rather than overriding when executing self-extracting archive." + echo " --export-conf : Export configuration variables to startup_script" + echo + echo "Do not forget to give a fully qualified startup script name" + echo "(i.e. with a ./ prefix if inside the archive)." + exit 1 +} + +# Default settings +if type gzip 2>&1 > /dev/null; then + COMPRESS=gzip +else + COMPRESS=Unix +fi +COMPRESS_LEVEL=9 +KEEP=n +CURRENT=n +NOX11=n +NOWAIT=n +APPEND=n +TAR_QUIETLY=n +KEEP_UMASK=n +QUIET=n +NOPROGRESS=n +COPY=none +NEED_ROOT=n +TAR_ARGS=cvf +TAR_EXTRA="" +GPG_EXTRA="" +DU_ARGS=-ks +HEADER=`dirname "$0"`/makeself-header.sh +TARGETDIR="" +NOOVERWRITE=n +DATE=`LC_ALL=C date` +EXPORT_CONF=n + +# LSM file stuff +LSM_CMD="echo No LSM. >> \"\$archname\"" + +while true +do + case "$1" in + --version | -v) + echo Makeself version $MS_VERSION + exit 0 + ;; + --pbzip2) + COMPRESS=pbzip2 + shift + ;; + --bzip2) + COMPRESS=bzip2 + shift + ;; + --gzip) + COMPRESS=gzip + shift + ;; + --pigz) + COMPRESS=pigz + shift + ;; + --xz) + COMPRESS=xz + shift + ;; + --lzo) + COMPRESS=lzo + shift + ;; + --lz4) + COMPRESS=lz4 + shift + ;; + --compress) + COMPRESS=Unix + shift + ;; + --base64) + COMPRESS=base64 + shift + ;; + --gpg-encrypt) + COMPRESS=gpg + shift + ;; + --gpg-asymmetric-encrypt-sign) + COMPRESS=gpg-asymmetric + shift + ;; + --gpg-extra) + GPG_EXTRA="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --ssl-encrypt) + COMPRESS=openssl + shift + ;; + --nocomp) + COMPRESS=none + shift + ;; + --complevel) + COMPRESS_LEVEL="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --notemp) + KEEP=y + shift + ;; + --copy) + COPY=copy + shift + ;; + --current) + CURRENT=y + KEEP=y + shift + ;; + --tar-extra) + TAR_EXTRA="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --untar-extra) + UNTAR_EXTRA="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --target) + TARGETDIR="$2" + KEEP=y + if ! shift 2; then MS_Help; exit 1; fi + ;; + --nooverwrite) + NOOVERWRITE=y + shift + ;; + --needroot) + NEED_ROOT=y + shift + ;; + --header) + HEADER="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --license) + LICENSE=`cat $2` + if ! shift 2; then MS_Help; exit 1; fi + ;; + --follow) + TAR_ARGS=cvhf + DU_ARGS=-ksL + shift + ;; + --noprogress) + NOPROGRESS=y + shift + ;; + --nox11) + NOX11=y + shift + ;; + --nowait) + NOWAIT=y + shift + ;; + --nomd5) + NOMD5=y + shift + ;; + --nocrc) + NOCRC=y + shift + ;; + --append) + APPEND=y + shift + ;; + --lsm) + LSM_CMD="cat \"$2\" >> \"\$archname\"" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --packaging-date) + DATE="$2" + if ! shift 2; then MS_Help; exit 1; fi + ;; + --help-header) + HELPHEADER=`sed -e "s/'/'\\\\\''/g" $2` + if ! shift 2; then MS_Help; exit 1; fi + [ -n "$HELPHEADER" ] && HELPHEADER="$HELPHEADER +" + ;; + --tar-quietly) + TAR_QUIETLY=y + shift + ;; + --keep-umask) + KEEP_UMASK=y + shift + ;; + --export-conf) + EXPORT_CONF=y + shift + ;; + -q | --quiet) + QUIET=y + shift + ;; + -h | --help) + MS_Usage + ;; + -*) + echo Unrecognized flag : "$1" + MS_Usage + ;; + *) + break + ;; + esac +done + +if test $# -lt 1; then + MS_Usage +else + if test -d "$1"; then + archdir="$1" + else + echo "Directory $1 does not exist." >&2 + exit 1 + fi +fi +archname="$2" + +if test "$QUIET" = "y" || test "$TAR_QUIETLY" = "y"; then + if test "$TAR_ARGS" = "cvf"; then + TAR_ARGS="cf" + elif test "$TAR_ARGS" = "cvhf";then + TAR_ARGS="chf" + fi +fi + +if test "$APPEND" = y; then + if test $# -lt 2; then + MS_Usage + fi + + # Gather the info from the original archive + OLDENV=`sh "$archname" --dumpconf` + if test $? -ne 0; then + echo "Unable to update archive: $archname" >&2 + exit 1 + else + eval "$OLDENV" + fi +else + if test "$KEEP" = n -a $# = 3; then + echo "ERROR: Making a temporary archive with no embedded command does not make sense!" >&2 + echo >&2 + MS_Usage + fi + # We don't want to create an absolute directory unless a target directory is defined + if test "$CURRENT" = y; then + archdirname="." + elif test x$TARGETDIR != x; then + archdirname="$TARGETDIR" + else + archdirname=`basename "$1"` + fi + + if test $# -lt 3; then + MS_Usage + fi + + LABEL="$3" + SCRIPT="$4" + test "x$SCRIPT" = x || shift 1 + shift 3 + SCRIPTARGS="$*" +fi + +if test "$KEEP" = n -a "$CURRENT" = y; then + echo "ERROR: It is A VERY DANGEROUS IDEA to try to combine --notemp and --current." >&2 + exit 1 +fi + +case $COMPRESS in +gzip) + GZIP_CMD="gzip -c$COMPRESS_LEVEL" + GUNZIP_CMD="gzip -cd" + ;; +pigz) + GZIP_CMD="pigz -$COMPRESS_LEVEL" + GUNZIP_CMD="gzip -cd" + ;; +pbzip2) + GZIP_CMD="pbzip2 -c$COMPRESS_LEVEL" + GUNZIP_CMD="bzip2 -d" + ;; +bzip2) + GZIP_CMD="bzip2 -$COMPRESS_LEVEL" + GUNZIP_CMD="bzip2 -d" + ;; +xz) + GZIP_CMD="xz -c$COMPRESS_LEVEL" + GUNZIP_CMD="xz -d" + ;; +lzo) + GZIP_CMD="lzop -c$COMPRESS_LEVEL" + GUNZIP_CMD="lzop -d" + ;; +lz4) + GZIP_CMD="lz4 -c$COMPRESS_LEVEL" + GUNZIP_CMD="lz4 -d" + ;; +base64) + GZIP_CMD="base64" + GUNZIP_CMD="base64 -d -i" + ;; +gpg) + GZIP_CMD="gpg $GPG_EXTRA -ac -z$COMPRESS_LEVEL" + GUNZIP_CMD="gpg -d" + ;; +gpg-asymmetric) + GZIP_CMD="gpg $GPG_EXTRA -z$COMPRESS_LEVEL -es" + GUNZIP_CMD="gpg --yes -d" + ;; +openssl) + GZIP_CMD="openssl aes-256-cbc -a -salt -md sha256" + GUNZIP_CMD="openssl aes-256-cbc -d -a -md sha256" + ;; +Unix) + GZIP_CMD="compress -cf" + GUNZIP_CMD="exec 2>&-; uncompress -c || test \\\$? -eq 2 || gzip -cd" + ;; +none) + GZIP_CMD="cat" + GUNZIP_CMD="cat" + ;; +esac + +tmpfile="${TMPDIR:=/tmp}/mkself$$" + +if test -f "$HEADER"; then + oldarchname="$archname" + archname="$tmpfile" + # Generate a fake header to count its lines + SKIP=0 + . "$HEADER" + SKIP=`cat "$tmpfile" |wc -l` + # Get rid of any spaces + SKIP=`expr $SKIP` + rm -f "$tmpfile" + if test "$QUIET" = "n";then + echo Header is $SKIP lines long >&2 + fi + + archname="$oldarchname" +else + echo "Unable to open header file: $HEADER" >&2 + exit 1 +fi + +if test "$QUIET" = "n";then + echo +fi + +if test "$APPEND" = n; then + if test -f "$archname"; then + echo "WARNING: Overwriting existing file: $archname" >&2 + fi +fi + +USIZE=`du $DU_ARGS "$archdir" | awk '{print $1}'` + +if test "." = "$archdirname"; then + if test "$KEEP" = n; then + archdirname="makeself-$$-`date +%Y%m%d%H%M%S`" + fi +fi + +test -d "$archdir" || { echo "Error: $archdir does not exist."; rm -f "$tmpfile"; exit 1; } +if test "$QUIET" = "n";then + echo About to compress $USIZE KB of data... + echo Adding files to archive named \"$archname\"... +fi +exec 3<> "$tmpfile" +( cd "$archdir" && ( tar $TAR_EXTRA -$TAR_ARGS - . | eval "$GZIP_CMD" >&3 ) ) || \ + { echo Aborting: archive directory not found or temporary file: "$tmpfile" could not be created.; exec 3>&-; rm -f "$tmpfile"; exit 1; } +exec 3>&- # try to close the archive + +fsize=`cat "$tmpfile" | wc -c | tr -d " "` + +# Compute the checksums + +md5sum=00000000000000000000000000000000 +crcsum=0000000000 + +if test "$NOCRC" = y; then + if test "$QUIET" = "n";then + echo "skipping crc at user request" + fi +else + crcsum=`cat "$tmpfile" | CMD_ENV=xpg4 cksum | sed -e 's/ /Z/' -e 's/ /Z/' | cut -dZ -f1` + if test "$QUIET" = "n";then + echo "CRC: $crcsum" + fi +fi + +if test "$NOMD5" = y; then + if test "$QUIET" = "n";then + echo "skipping md5sum at user request" + fi +else + # Try to locate a MD5 binary + OLD_PATH=$PATH + PATH=${GUESS_MD5_PATH:-"$OLD_PATH:/bin:/usr/bin:/sbin:/usr/local/ssl/bin:/usr/local/bin:/opt/openssl/bin"} + MD5_ARG="" + MD5_PATH=`exec <&- 2>&-; which md5sum || command -v md5sum || type md5sum` + test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which md5 || command -v md5 || type md5` + test -x "$MD5_PATH" || MD5_PATH=`exec <&- 2>&-; which digest || command -v digest || type digest` + PATH=$OLD_PATH + if test -x "$MD5_PATH"; then + if test `basename ${MD5_PATH}`x = digestx; then + MD5_ARG="-a md5" + fi + md5sum=`cat "$tmpfile" | eval "$MD5_PATH $MD5_ARG" | cut -b-32`; + if test "$QUIET" = "n";then + echo "MD5: $md5sum" + fi + else + if test "$QUIET" = "n";then + echo "MD5: none, MD5 command not found" + fi + fi +fi + +if test "$APPEND" = y; then + mv "$archname" "$archname".bak || exit + + # Prepare entry for new archive + filesizes="$filesizes $fsize" + CRCsum="$CRCsum $crcsum" + MD5sum="$MD5sum $md5sum" + USIZE=`expr $USIZE + $OLDUSIZE` + # Generate the header + . "$HEADER" + # Append the original data + tail -n +$OLDSKIP "$archname".bak >> "$archname" + # Append the new data + cat "$tmpfile" >> "$archname" + + chmod +x "$archname" + rm -f "$archname".bak + if test "$QUIET" = "n";then + echo Self-extractable archive \"$archname\" successfully updated. + fi +else + filesizes="$fsize" + CRCsum="$crcsum" + MD5sum="$md5sum" + + # Generate the header + . "$HEADER" + + # Append the compressed tar data after the stub + if test "$QUIET" = "n";then + echo + fi + cat "$tmpfile" >> "$archname" + chmod +x "$archname" + if test "$QUIET" = "n";then + echo Self-extractable archive \"$archname\" successfully created. + fi +fi +rm -f "$tmpfile" diff --git a/packaging/makeself/post-installer.sh b/packaging/makeself/post-installer.sh new file mode 100755 index 000000000..38cc41ef7 --- /dev/null +++ b/packaging/makeself/post-installer.sh @@ -0,0 +1,11 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-3.0-or-later + +# This script is started using the shell of the system +# and executes our 'install-or-update.sh' script +# using the netdata supplied, statically linked BASH +# +# so, at 'install-or-update.sh' we are always sure +# we run under BASH v4. + +./bin/bash system/install-or-update.sh "${@}" diff --git a/packaging/makeself/run-all-jobs.sh b/packaging/makeself/run-all-jobs.sh new file mode 100755 index 000000000..f7507c2d2 --- /dev/null +++ b/packaging/makeself/run-all-jobs.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +LC_ALL=C +umask 002 + +# be nice +renice 19 $$ >/dev/null 2>/dev/null + +# ----------------------------------------------------------------------------- +# prepare the environment for the jobs + +# installation directory +export NETDATA_INSTALL_PATH="${1-/opt/netdata}" + +# our source directory +export NETDATA_MAKESELF_PATH="$(dirname "${0}")" +if [ "${NETDATA_MAKESELF_PATH:0:1}" != "/" ] + then + export NETDATA_MAKESELF_PATH="$(pwd)/${NETDATA_MAKESELF_PATH}" +fi + +# netdata source directory +export NETDATA_SOURCE_PATH="${NETDATA_MAKESELF_PATH}/../.." + +# make sure ${NULL} is empty +export NULL= + +# ----------------------------------------------------------------------------- + +cd "${NETDATA_MAKESELF_PATH}" || exit 1 + +. ./functions.sh "${@}" || exit 1 + +for x in jobs/*.install.sh +do + progress "running ${x}" + "${x}" "${NETDATA_INSTALL_PATH}" +done + +echo >&2 "All jobs for static packaging done successfully." +exit 0 diff --git a/packaging/version b/packaging/version new file mode 100644 index 000000000..a5effa303 --- /dev/null +++ b/packaging/version @@ -0,0 +1 @@ +v1.12.0 diff --git a/registry/Makefile.in b/registry/Makefile.in deleted file mode 100644 index fded2d81f..000000000 --- a/registry/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = registry -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu registry/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu registry/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/registry/README.md b/registry/README.md index de24b3018..5a9a2b3bb 100644 --- a/registry/README.md +++ b/registry/README.md @@ -1,4 +1,4 @@ -# Netdata registry +# Registry Netdata registry implements the `my-netdata` menu on netdata dashboards. The `my-netdata` menu lists the netdata servers you have visited. @@ -36,23 +36,23 @@ The registry keeps track of 3 entities: For each netdata installation (each `machine_guid`) the registry keeps track of the different URLs it is accessed. -1. **persons**: i.e. the web browsers accessing the netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**) +2. **persons**: i.e. the web browsers accessing the netdata installations (a random GUID generated by the registry the first time it sees a new web browser; we call this **person_guid**) For each person, the registry keeps track of the netdata installations it has accessed and their URLs. -1. **URLs** of netdata installations (as seen by the web browsers) +3. **URLs** of netdata installations (as seen by the web browsers) For each URL, the registry keeps the URL and nothing more. Each URL is linked to *persons* and *machines*. The only way to find a URL is to know its **machine_guid** or have a **person_guid** it is linked to it. ## Who talks to the registry? -Your web browser **only**! Check here if this is against your policies: [how to not send any information to a thirdparty server](../doc/netdata-security.md#netdata-security) +Your web browser **only**! If sending this information is against your policies, you can [run your own registry](#run-your-own-registry) Your netdata servers do not talk to the registry. This is a UML diagram of its operation: ![registry](https://cloud.githubusercontent.com/assets/2662304/19448565/11a70632-94ab-11e6-9d80-f410b4acb797.png) -## What data the registry maintains? +## What data does the registry store? Its database contains: @@ -72,9 +72,9 @@ Yeap! The registry can handle 50.000 - 100.000 requests **per second per core** We believe, it can do it... -## Every netdata can be a registry +## Run your own registry -Yes, you read correct, **every netdata can be a registry**. Just pick one and configure it. +**Every netdata can be a registry**. Just pick one and configure it. **To turn any netdata into a registry**, edit `/etc/netdata/netdata.conf` and set: @@ -150,3 +150,5 @@ ERROR 409: Cannot ACCESS netdata registry: https://registry.my-netdata.io respon ``` This error is printed on your web browser console (press F12 on your browser to see it). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fregistry%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/registry/registry.c b/registry/registry.c index 4f97eb58f..aaa448c51 100644 --- a/registry/registry.c +++ b/registry/registry.c @@ -81,6 +81,8 @@ static int registry_json_person_url_callback(void *entry, void *data) { struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data; struct web_client *w = c->w; + if (!strcmp(pu->url->url,"***")) return 0; + if(unlikely(c->count++)) buffer_strcat(w->response.data, ","); @@ -97,6 +99,8 @@ static int registry_json_machine_url_callback(void *entry, void *data) { struct web_client *w = c->w; REGISTRY_MACHINE *m = c->m; + if (!strcmp(mu->url->url,"***")) return 1; + if(unlikely(c->count++)) buffer_strcat(w->response.data, ","); @@ -131,8 +135,10 @@ static inline int registry_person_url_callback_verify_machine_exists(void *entry int registry_request_hello_json(RRDHOST *host, struct web_client *w) { registry_json_header(host, w, "hello", REGISTRY_STATUS_OK); - buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"", - registry.registry_to_announce); + buffer_sprintf(w->response.data, + ",\n\t\"registry\": \"%s\",\n\t\"cloud_base_url\": \"%s\",\n\t\"anonymous_statistics\": %s", + registry.registry_to_announce, + registry.cloud_base_url, netdata_anonymous_statistics_enabled?"true":"false"); registry_json_footer(w); return 200; @@ -332,7 +338,7 @@ void registry_statistics(void) { , "registry" , NULL , "NetData Registry Sessions" - , "session" + , "sessions" , "registry" , "stats" , 131000 @@ -390,7 +396,7 @@ void registry_statistics(void) { , "registry" , NULL , "NetData Registry Memory" - , "KB" + , "KiB" , "registry" , "stats" , 131300 diff --git a/registry/registry.h b/registry/registry.h index ab36de014..ca74300e0 100644 --- a/registry/registry.h +++ b/registry/registry.h @@ -72,6 +72,7 @@ extern int registry_request_hello_json(RRDHOST *host, struct web_client *w); extern void registry_statistics(void); extern char *registry_get_this_machine_guid(void); +extern char *registry_get_mgmt_api_key(void); extern char *registry_get_this_machine_hostname(void); extern int regenerate_guid(const char *guid, char *result); diff --git a/registry/registry_init.c b/registry/registry_init.c index d3e0420d2..3cf140dee 100644 --- a/registry/registry_init.c +++ b/registry/registry_init.c @@ -40,6 +40,9 @@ int registry_init(void) { registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", netdata_configured_hostname); registry.verify_cookies_redirects = config_get_boolean(CONFIG_SECTION_REGISTRY, "verify browser cookies support", 1); + // netdata.cloud configuration, if cloud_base_url == "", cloud functionality is disabled. + registry.cloud_base_url = config_get(CONFIG_SECTION_CLOUD, "cloud base url", "https://netdata.cloud"); + setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1); setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1); diff --git a/registry/registry_internals.h b/registry/registry_internals.h index baa2dc09d..c126e454b 100644 --- a/registry/registry_internals.h +++ b/registry/registry_internals.h @@ -37,6 +37,7 @@ struct registry { char *registry_domain; char *hostname; char *registry_to_announce; + char *cloud_base_url; time_t persons_expiration; // seconds to expire idle persons int verify_cookies_redirects; diff --git a/registry/registry_person.c b/registry/registry_person.c index 53e3f47f4..268b0bd13 100644 --- a/registry/registry_person.c +++ b/registry/registry_person.c @@ -79,7 +79,7 @@ REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_M REGISTRY_PERSON_URL *tpu = registry_person_url_index_add(p, pu); if(tpu != pu) { error("Registry: Attempted to add duplicate person url '%s' with name '%s' to person '%s'", u->url, name, p->guid); - free(pu); + freez(pu); pu = tpu; } else diff --git a/registry/registry_url.c b/registry/registry_url.c index 6a7106458..9ac3ce10c 100644 --- a/registry/registry_url.c +++ b/registry/registry_url.c @@ -51,7 +51,7 @@ REGISTRY_URL *registry_url_get(const char *url, size_t urllen) { n = registry_url_index_add(u); if(n != u) { error("INTERNAL ERROR: registry_url_get(): url '%s' already exists in the registry as '%s'", u->url, n->url); - free(u); + freez(u); u = n; } else diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 502183108..000000000 --- a/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -mkdocs>=1.0.1 -mkdocs-material - diff --git a/runtime.txt b/runtime.txt deleted file mode 100644 index d70c8f8d8..000000000 --- a/runtime.txt +++ /dev/null @@ -1 +0,0 @@ -3.6 diff --git a/streaming/Makefile.in b/streaming/Makefile.in deleted file mode 100644 index c3a5fdc74..000000000 --- a/streaming/Makefile.in +++ /dev/null @@ -1,521 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = streaming -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_libconfig_DATA) $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(libconfigdir)" -DATA = $(dist_libconfig_DATA) $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_libconfig_DATA = \ - stream.conf \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu streaming/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu streaming/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_libconfigDATA: $(dist_libconfig_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \ - done - -uninstall-dist_libconfigDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(libconfigdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_libconfigDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_libconfigDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_libconfigDATA install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-ps install-ps-am \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ - uninstall-am uninstall-dist_libconfigDATA - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/streaming/README.md b/streaming/README.md index e455d1070..e1ebad536 100644 --- a/streaming/README.md +++ b/streaming/README.md @@ -1,4 +1,4 @@ -# Metrics streaming +# Streaming and replication Each netdata is able to replicate/mirror its database to another netdata, by streaming collected metrics, in real-time to it. This is quite different to [data archiving to third party time-series @@ -18,13 +18,13 @@ a netdata performs: Local netdata (`slave`), **without any database or alarms**, collects metrics and sends them to another netdata (`master`). -The user can take the full functionality of the `slave` netdata at -http://master.ip:19999/host/slave.hostname/. Alarms for the `slave` are served by the `master`. +The `my-netdata` menu shows a list of all "databases streamed to" the master. Clicking one of those links allows the user to view the full dashboard of the `slave` netdata. The URL has the form http://master-host:master-port/host/slave-host/. -In this mode the `slave` is just a plain data collector. -It runs with... **5MB** of RAM (yes, you read correct), spawns all external plugins, but instead +Alarms for the `slave` are served by the `master`. + +In this mode the `slave` is just a plain data collector. It spawns all external plugins, but instead of maintaining a local database and accepting dashboard requests, it streams all metrics to the -`master`. +`master`. The memory footprint is reduced significantly, to between 6 MiB and 40 MiB, depending on the enabled plugins. To reduce the memory usage as much as possible, refer to [running netdata in embedded devices](../docs/Performance.md#running-netdata-in-embedded-devices). The same `master` can collect data for any number of `slaves`. @@ -33,8 +33,8 @@ The same `master` can collect data for any number of `slaves`. Local netdata (`slave`), **with a local database (and possibly alarms)**, collects metrics and sends them to another netdata (`master`). -The user can use all the functions **at both** http://slave.ip:19999/ and -http://master.ip:19999/host/slave.hostname/. +The user can use all the functions **at both** http://slave-ip:slave-port/ and +http://master-host:master-port/host/slave-host/. The `slave` and the `master` may have different data retention policies for the same metrics. @@ -81,12 +81,15 @@ monitoring (there cannot be health monitoring without a database). ``` [web] - mode = none | static-threaded | single-threaded | multi-threaded + mode = none | static-threaded + accept a streaming request every seconds = 0 ``` `[web].mode = none` disables the API (netdata will not listen to any ports). This also disables the registry (there cannot be a registry without an API). +`accept a streaming request every seconds` can be used to set a limit on how often a master Netdata server will accept streaming requests from the slaves. 0 sets no limit, 1 means maximum once every second. If this is set, you may see error log entries "... too busy to accept new streaming request. Will be allowed in X secs". + ``` [backend] enabled = yes | no @@ -410,3 +413,5 @@ The sending side of a netdata proxy, connects and disconnects to the final desti metrics, following the same pattern of the receiving side. For a practical example see [Monitoring ephemeral nodes](#monitoring-ephemeral-nodes). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fstreaming%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/streaming/rrdpush.c b/streaming/rrdpush.c index df1a2177f..617484c80 100644 --- a/streaming/rrdpush.c +++ b/streaming/rrdpush.c @@ -1114,7 +1114,7 @@ int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url char buf[GUID_LEN + 1]; while(url) { - char *value = mystrsep(&url, "?&"); + char *value = mystrsep(&url, "&"); if(!value || !*value) continue; char *name = mystrsep(&value, "="); diff --git a/system/Makefile.in b/system/Makefile.in deleted file mode 100644 index 3598ebde5..000000000 --- a/system/Makefile.in +++ /dev/null @@ -1,588 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \ - $(srcdir)/Makefile.am $(dist_config_SCRIPTS) \ - $(dist_noinst_DATA) -subdir = system -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(configdir)" -SCRIPTS = $(dist_config_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) $(nodist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ - -# -# Copyright (C) 2015 Alon Bar-Lev -# SPDX-License-Identifier: GPL-3.0-or-later -# -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - edit-config \ - netdata-openrc \ - netdata.logrotate \ - netdata.service \ - netdata-init-d \ - netdata-lsb \ - netdata-freebsd \ - netdata.plist \ - $(NULL) - -SUFFIXES = .in -dist_config_SCRIPTS = \ - edit-config \ - $(NULL) - -nodist_noinst_DATA = \ - netdata-openrc \ - netdata.logrotate \ - netdata.service \ - netdata-init-d \ - netdata-lsb \ - netdata-freebsd \ - netdata.plist \ - $(NULL) - -dist_noinst_DATA = \ - edit-config.in \ - netdata-openrc.in \ - netdata.logrotate.in \ - netdata.service.in \ - netdata-init-d.in \ - netdata-lsb.in \ - netdata-freebsd.in \ - netdata.plist.in \ - netdata.conf \ - $(NULL) - -all: all-am - -.SUFFIXES: -.SUFFIXES: .in -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu system/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu system/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/build/subst.inc: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_configSCRIPTS: $(dist_config_SCRIPTS) - @$(NORMAL_INSTALL) - @list='$(dist_config_SCRIPTS)'; test -n "$(configdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(configdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(configdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ - done | \ - sed -e 'p;s,.*/,,;n' \ - -e 'h;s|.*|.|' \ - -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ - $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ - { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ - if ($$2 == $$4) { files[d] = files[d] " " $$1; \ - if (++n[d] == $(am__install_max)) { \ - print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ - else { print "f", d "/" $$4, $$1 } } \ - END { for (d in files) print "f", d, files[d] }' | \ - while read type dir files; do \ - if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ - test -z "$$files" || { \ - echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(configdir)$$dir'"; \ - $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(configdir)$$dir" || exit $$?; \ - } \ - ; done - -uninstall-dist_configSCRIPTS: - @$(NORMAL_UNINSTALL) - @list='$(dist_config_SCRIPTS)'; test -n "$(configdir)" || exit 0; \ - files=`for p in $$list; do echo "$$p"; done | \ - sed -e 's,.*/,,;$(transform)'`; \ - dir='$(DESTDIR)$(configdir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: - for dir in "$(DESTDIR)$(configdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_configSCRIPTS - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_configSCRIPTS - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_configSCRIPTS install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-ps install-ps-am \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic pdf pdf-am ps ps-am tags-am uninstall \ - uninstall-am uninstall-dist_configSCRIPTS - -.in: - if sed \ - -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \ - -e 's#[@]sbindir_POST@#$(sbindir)#g' \ - -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \ - -e 's#[@]pythondir_POST@#$(pythondir)#g' \ - -e 's#[@]configdir_POST@#$(configdir)#g' \ - -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ - -e 's#[@]cachedir_POST@#$(cachedir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/system/edit-config b/system/edit-config deleted file mode 100644 index e7f50e767..000000000 --- a/system/edit-config +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env sh - -[ -f /etc/profile ] && . /etc/profile - -file="${1}" - -if [ "$(command -v editor)" ] ; then - EDITOR="${EDITOR-editor}" -else - EDITOR="${EDITOR-vi}" -fi - -[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="/usr/local/etc/netdata" -[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="/usr/local/lib/netdata/conf.d" - -if [ -z "${file}" ] -then - cat <&2 -R *.conf */*.conf - exit 1 - -fi - -file_is_in_path() { - local file path real - file="${1}" - path="${2}" - - real="$(readlink -f "${file}")" - - # we don't have working readlink - [ -z "${real}" ] && return 0 - - if [ ! -z "${real}" ] && [ -z "$(echo "${real}" | grep -E "^${path}.*$")" ] - then - echo >&2 "File '${file}' is physically at '${real}', which is not in '${path}'. Aborting." - exit 1 - fi - - return 0 -} - -edit() { - echo >&2 "Editing '${1}' ..." - - # check we can edit - file_is_in_path "${1}" "${NETDATA_USER_CONFIG_DIR}" || exit 1 - - "${EDITOR}" "${1}" - exit $? -} - -copy_and_edit() { - # check we can copy - file_is_in_path "${NETDATA_STOCK_CONFIG_DIR}/${1}" "${NETDATA_STOCK_CONFIG_DIR}" || exit 1 - - if [ ! -f "${NETDATA_USER_CONFIG_DIR}/${1}" ] - then - echo >&2 "Copying '${NETDATA_STOCK_CONFIG_DIR}/${1}' to '${NETDATA_USER_CONFIG_DIR}/${1}' ... " - cp -p "${NETDATA_STOCK_CONFIG_DIR}/${1}" "${NETDATA_USER_CONFIG_DIR}/${1}" || exit 1 - fi - - edit "${NETDATA_USER_CONFIG_DIR}/${1}" -} - -# make sure it is not absolute filename -c1="$(echo "${file}" | cut -b 1)" -if [ "${c1}" = "/" ] || [ "${c1}" = "." ] -then - echo >&2 "Please don't use filenames starting with '/' or '.'" - exit 1 -fi - -# already exists -if [ -f "${NETDATA_USER_CONFIG_DIR}/${file}" ] -then - edit "${NETDATA_USER_CONFIG_DIR}/${file}" -fi - -[ -f "${NETDATA_USER_CONFIG_DIR}/${file}" ] && edit "${NETDATA_USER_CONFIG_DIR}/${file}" -[ -f "${NETDATA_STOCK_CONFIG_DIR}/${file}" ] && copy_and_edit "${file}" - -echo >&2 "File '${file}' is not found in '${NETDATA_STOCK_CONFIG_DIR}'" -exit 1 diff --git a/system/netdata-freebsd.in b/system/netdata-freebsd.in index 300ddc9ec..233535bfe 100644 --- a/system/netdata-freebsd.in +++ b/system/netdata-freebsd.in @@ -12,7 +12,7 @@ pidfile="${pidfile}/netdata.pid" command="@sbindir_POST@/netdata" command_args="-P ${pidfile}" -required_files="@sysconfdir_POST@/netdata/netdata.conf" +required_files="@configdir_POST@/netdata.conf" start_precmd="netdata_prestart" stop_postcmd="netdata_poststop" diff --git a/tests/Makefile.am b/tests/Makefile.am index 722266d77..b0f65456e 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,7 +1,15 @@ # SPDX-License-Identifier: GPL-3.0-or-later +AUTOMAKE_OPTIONS = subdir-objects MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + health_mgmtapi/health-cmdapi-test.sh \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + dist_noinst_DATA = \ README.md \ web/lib/jasmine-jquery.js \ @@ -13,8 +21,14 @@ dist_noinst_DATA = \ node.d/fronius.parse.spec.js \ node.d/fronius.process.spec.js \ node.d/fronius.validation.spec.js \ + health_mgmtapi/health-cmdapi-test.sh.in \ + $(NULL) + +dist_plugins_SCRIPTS = \ + health_mgmtapi/health-cmdapi-test.sh \ $(NULL) dist_noinst_SCRIPTS = \ stress.sh \ $(NULL) + diff --git a/tests/Makefile.in b/tests/Makefile.in deleted file mode 100644 index c9109dbdd..000000000 --- a/tests/Makefile.in +++ /dev/null @@ -1,478 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = tests -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_SCRIPTS) $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -SCRIPTS = $(dist_noinst_SCRIPTS) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - web/lib/jasmine-jquery.js \ - web/easypiechart.chart.spec.js \ - web/easypiechart.percentage.spec.js \ - web/karma.conf.js \ - web/fixtures/easypiechart.chart.fixture1.html \ - node.d/fronius.chart.spec.js \ - node.d/fronius.parse.spec.js \ - node.d/fronius.process.spec.js \ - node.d/fronius.validation.spec.js \ - $(NULL) - -dist_noinst_SCRIPTS = \ - stress.sh \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tests/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu tests/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(SCRIPTS) $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/tests/README.md b/tests/README.md index 3dd8859a4..4ac3f2105 100644 --- a/tests/README.md +++ b/tests/README.md @@ -134,3 +134,6 @@ Apparently, jasmine-node can produce a junit report with the `--junitreport` fla The karma and node.d runners can be integrated in Travis (AFAIK), but that is outside my ability. Note: Karma is for browser-testing. On a build server, no GUI or browser might by available, unless browsers support headless mode. + + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/tests/health_mgmtapi/README.md b/tests/health_mgmtapi/README.md new file mode 100644 index 000000000..278c72dc1 --- /dev/null +++ b/tests/health_mgmtapi/README.md @@ -0,0 +1,13 @@ +# Health command API tester + +The directory `tests/health_cmdapi` contains the test script `health-cmdapi-test.sh` for the [health command API](../../web/api/health). + +The script can be executed with options to prepare the system for the tests, run them and restore the system to its previous state. + +It depends on the management API being accessible and on the responses to the api/v1/alarms?all requests being functional. + +Run it with `tests/health_mgmtapi/health-cmdapi-test.sh -h` to see the options. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Ftests%2Fhealth_mgmtapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() + + diff --git a/tests/health_mgmtapi/health-cmdapi-test.sh.in b/tests/health_mgmtapi/health-cmdapi-test.sh.in new file mode 100755 index 000000000..5e218b11e --- /dev/null +++ b/tests/health_mgmtapi/health-cmdapi-test.sh.in @@ -0,0 +1,263 @@ +#!/usr/bin/env bash + +NETDATA_USER_CONFIG_DIR="@configdir_POST@" +NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@" +NETDATA_VARLIB_DIR="@varlibdir_POST@" + +printhelp () { + echo "Usage: health-cmdapi-test.sh [OPTIONS] + -s SETUP config files for python example tests + -c CLEANUP config files from python example tests + -r RESTART netdata after SETUP and CLEANUP, using systemctl restart netdata. + -t TEST scenarios execution + -u changes the host:port from localhost:19999 to + " +} + +check () { + echo -e "${GRAY}Check: '${1}' in 2 sec" + sleep 2 + resp=$(curl -s "http://$URL/api/v1/alarms?all") + r=$(echo "${resp}" | \ + python3 -c "import sys, json; d=json.load(sys.stdin); \ + print(\ + d['alarms']['example.random.example_alarm1']['disabled'], \ + d['alarms']['example.random.example_alarm1']['silenced'] , \ + d['alarms']['example.random.example_alarm2']['disabled'], \ + d['alarms']['example.random.example_alarm2']['silenced'], \ + d['alarms']['system.load.load_trigger']['disabled'], \ + d['alarms']['system.load.load_trigger']['silenced'], \ + );" 2>&1) + if [ $? -ne 0 ] ; then + echo -e "${RED}ERROR: Unexpected response '$resp'" + err=$((err+1)) + elif [ "${r}" != "${2}" ] ; then + echo -e "${RED}ERROR: 'Got ${r}'. Expected '${2}'" + err=$((err+1)) + else + echo -e "${GREEN}Success" + fi +} + +cmd () { + echo -e "${WHITE}Cmd '${1}', expecting '${2}'" + RESPONSE=$(curl -s "http://$URL/api/v1/manage/health?${1}" -H "X-Auth-Token: $TOKEN" 2>&1) + if [ "${RESPONSE}" != "${2}" ] ; then + echo -e "${RED}ERROR: Response '${RESPONSE}' != '${2}'" + err=$((err+1)) + else + echo -e "${GREEN}Success" + fi +} + +WHITE='\033[0;37m' +RED='\033[0;31m' +GREEN='\033[0;32m' +GRAY='\033[0;37m' + +SETUP=0 +RESTART=0 +CLEANUP=0 +TEST=0 +URL="localhost:19999" + +while getopts :srctu: option +do + case "$option" in + s) + SETUP=1 + ;; + r) + RESTART=1 + ;; + c) + CLEANUP=1 + ;; + t) + TEST=1 + ;; + u) + URL=$OPTARG + ;; + *) + printhelp + exit 1 + ;; + esac +done + +if [ $SETUP -eq 1 ] ; then + echo "Preparing netdata configuration for testing" + # Prep netdata for tests + if [ -f "${NETDATA_USER_CONFIG_DIR}/python.d.conf" ] ; then + cp -f "${NETDATA_USER_CONFIG_DIR}/python.d.conf" /tmp/python.d.conf + else + cp "${NETDATA_STOCK_CONFIG_DIR}/python.d.conf" "${NETDATA_USER_CONFIG_DIR}/" + fi + sed -i -e "s/example: no/example: yes/g" "${NETDATA_USER_CONFIG_DIR}/python.d.conf" + + mypath=$(cd ${0%/*} && echo $PWD) + + cp -f "${mypath}/python-example.conf" "${NETDATA_USER_CONFIG_DIR}/health.d/" + + # netdata.conf + if [ -f "${NETDATA_USER_CONFIG_DIR}/netdata.conf" ] ; then + cp -f "${NETDATA_USER_CONFIG_DIR}/netdata.conf" /tmp/netdata.conf + fi + printf "[health]\nrun at least every seconds = 1\n" > "${NETDATA_USER_CONFIG_DIR}/netdata.conf" + + chmod +r "${NETDATA_USER_CONFIG_DIR}/python.d.conf" "${NETDATA_USER_CONFIG_DIR}/netdata.conf" "${NETDATA_USER_CONFIG_DIR}/health.d/python-example.conf" "${NETDATA_STOCK_CONFIG_DIR}/health.d/load.conf" + # Restart netdata + if [ $RESTART -eq 1 ] ; then + echo "Restarting netdata" + systemctl restart netdata + fi +fi + +err=0 + +# Execute tests +if [ $TEST -eq 1 ] ; then + + HEALTH_CMDAPI_MSG_AUTHERROR="Auth Error" + HEALTH_CMDAPI_MSG_SILENCEALL="All alarm notifications are silenced" + HEALTH_CMDAPI_MSG_DISABLEALL="All health checks are disabled" + HEALTH_CMDAPI_MSG_RESET="All health checks and notifications are enabled" + HEALTH_CMDAPI_MSG_DISABLE="Health checks disabled for alarms matching the selectors" + HEALTH_CMDAPI_MSG_SILENCE="Alarm notifications silenced for alarms matching the selectors" + HEALTH_CMDAPI_MSG_ADDED="Alarm selector added" + HEALTH_CMDAPI_MSG_INVALID_KEY="Invalid key. Ignoring it." + HEALTH_CMDAPI_MSG_STYPEWARNING="WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command." + HEALTH_CMDAPI_MSG_NOSELECTORWARNING="WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors." + + if [ -f "${NETDATA_VARLIB_DIR}/netdata.api.key" ] ;then + read -r CORRECT_TOKEN < "${NETDATA_VARLIB_DIR}/netdata.api.key" + else + echo "${NETDATA_VARLIB_DIR}/netdata.api.key not found" + exit 1 + fi + # Set correct token + TOKEN="${CORRECT_TOKEN}" + + # Test default state + cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET" + check "Default State" "False False False False False False" + + # Test auth failure + TOKEN="Wrong token" + cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_AUTHERROR" + check "Default State" "False False False False False False" + + # Set correct token + TOKEN="${CORRECT_TOKEN}" + + # Test disable + cmd "cmd=DISABLE ALL" "$HEALTH_CMDAPI_MSG_DISABLEALL" + check "All disabled" "True False True False True False" + + # Reset + cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET" + check "Default State" "False False False False False False" + + # Test silence + cmd "cmd=SILENCE ALL" "$HEALTH_CMDAPI_MSG_SILENCEALL" + check "All silenced" "False True False True False True" + + # Reset + cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET" + check "Default State" "False False False False False False" + + # Add silencer by name + printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED" + cmd "cmd=SILENCE&alarm=*example_alarm1 *load_trigger" "${resp}" + check "Silence notifications for alarm1 and load_trigger" "False True False False False True" + + # Convert to disable health checks + cmd "cmd=DISABLE" "$HEALTH_CMDAPI_MSG_DISABLE" + check "Disable notifications for alarm1 and load_trigger" "True False False False True False" + + # Convert back to silence notifications + cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE" + check "Silence notifications for alarm1 and load_trigger" "False True False False False True" + + # Add second silencer by name + cmd "alarm=*example_alarm2" "$HEALTH_CMDAPI_MSG_ADDED" + check "Silence notifications for alarm1,alarm2 and load_trigger" "False True False True False True" + + # Reset + cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET" + + # Add silencer by chart + printf -v resp "$HEALTH_CMDAPI_MSG_DISABLE\n$HEALTH_CMDAPI_MSG_ADDED" + cmd "cmd=DISABLE&chart=system.load" "${resp}" + check "Default State" "False False False False True False" + + # Add silencer by context + cmd "context=random" "$HEALTH_CMDAPI_MSG_ADDED" + check "Default State" "True False True False True False" + + # Reset + cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET" + + # Add second condition to a selector (AND) + printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_ADDED" + cmd "cmd=SILENCE&alarm=*example_alarm1 *load_trigger&chart=system.load" "${resp}" + check "Silence notifications load_trigger" "False False False False False True" + + # Add second selector with two conditions + cmd "alarm=*example_alarm1 *load_trigger&context=random" "$HEALTH_CMDAPI_MSG_ADDED" + check "Silence notifications load_trigger" "False True False False False True" + + # Reset + cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET" + + # Add silencer without a command to disable or silence alarms + printf -v resp "$HEALTH_CMDAPI_MSG_ADDED\n$HEALTH_CMDAPI_MSG_STYPEWARNING" + cmd "families=load" "${resp}" + check "Family selector with no command" "False False False False False False" + + # Add silence command + cmd "cmd=SILENCE" "$HEALTH_CMDAPI_MSG_SILENCE" + check "Silence family load" "False False False False False True" + + # Reset + cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET" + + # Add command without silencers + printf -v resp "$HEALTH_CMDAPI_MSG_SILENCE\n$HEALTH_CMDAPI_MSG_NOSELECTORWARNING" + cmd "cmd=SILENCE" "${resp}" + check "Command with no selector" "False False False False False False" + + # Add hosts silencer + cmd "hosts=*" "$HEALTH_CMDAPI_MSG_ADDED" + check "Silence all hosts" "False True False True False True" + + # Reset + cmd "cmd=RESET" "$HEALTH_CMDAPI_MSG_RESET" + +fi + +# Cleanup +if [ $CLEANUP -eq 1 ] ; then + echo -e "${WHITE}Restoring netdata configuration" + for f in "python.d.conf" "netdata.conf" ; do + if [ -f "/tmp/$f" ] ; then + mv -f "/tmp/$f" "${NETDATA_USER_CONFIG_DIR}/" + else + rm -f "${NETDATA_USER_CONFIG_DIR}/$f" + fi + done + + rm -f "${NETDATA_USER_CONFIG_DIR}/health.d/python-example.conf" + + # Restart netdata + if [ $RESTART -eq 1 ] ; then + echo "Restarting netdata" + systemctl restart netdata + fi +fi + +if [ $err -gt 0 ] ; then + echo "$err error(s) found" + exit 1 +fi \ No newline at end of file diff --git a/tests/health_mgmtapi/python-example.conf b/tests/health_mgmtapi/python-example.conf new file mode 100644 index 000000000..66713208c --- /dev/null +++ b/tests/health_mgmtapi/python-example.conf @@ -0,0 +1,16 @@ +alarm: example_alarm1 + on: example.random + every: 2s + warn: $random1 > (($status >= $WARNING) ? (55) : (75)) + crit: $random1 > (($status == $CRITICAL) ? (75) : (95)) + info: random + to: sysadmin + +alarm: example_alarm2 + on: example.random + every: 2s + warn: $random2 > (($status >= $WARNING) ? (55) : (75)) + crit: $random2 > (($status == $CRITICAL) ? (75) : (95)) + info: random + to: sysadmin + diff --git a/tests/lifecycle.bats b/tests/lifecycle.bats new file mode 100755 index 000000000..8efdf4478 --- /dev/null +++ b/tests/lifecycle.bats @@ -0,0 +1,27 @@ +#!/usr/bin/env bats + +INSTALLATION="$BATS_TMPDIR/installation" +ENV="${INSTALLATION}/netdata/etc/netdata/.environment" + +setup() { + if [ ! -f .gitignore ]; then + echo "Run as ./tests/lifecycle/$(basename "$0") from top level directory of git repository" + exit 1 + fi +} + +@test "install netdata" { + ./netdata-installer.sh --dont-wait --dont-start-it --auto-update --install "${INSTALLATION}" +} + +@test "update netdata" { + export ENVIRONMENT_FILE="${ENV}" + /etc/cron.daily/netdata-updater + ! grep "new_installation" "${ENV}" +} + +@test "uninstall netdata" { + ./packaging/installer/netdata-uninstaller.sh --yes --force --env "${ENV}" + [ ! -f "${INSTALLATION}/netdata/usr/sbin/netdata" ] + [ ! -f "/etc/cron.daily/netdata-updater" ] +} diff --git a/tests/profile/Makefile b/tests/profile/Makefile new file mode 100644 index 000000000..5f4e8b521 --- /dev/null +++ b/tests/profile/Makefile @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +COMMON_CFLAGS = -I ../../ -DTARGET_OS=1 -Wall -Wextra +PROFILE_CFLAGS = -O1 -ggdb $(COMMON_CFLAGS) +PERFORMANCE_CFLAGS = -O2 $(COMMON_CFLAGS) + +CFLAGS = $(PERFORMANCE_CFLAGS) + +LIBNETDATA_FILES = \ + ../../libnetdata/popen/popen.o \ + ../../libnetdata/storage_number/storage_number.o \ + ../../libnetdata/avl/avl.o \ + ../../libnetdata/socket/socket.o \ + ../../libnetdata/os.o \ + ../../libnetdata/clocks/clocks.o \ + ../../libnetdata/procfile/procfile.o \ + ../../libnetdata/statistical/statistical.o \ + ../../libnetdata/eval/eval.o \ + ../../libnetdata/threads/threads.o \ + ../../libnetdata/dictionary/dictionary.o \ + ../../libnetdata/simple_pattern/simple_pattern.o \ + ../../libnetdata/url/url.o \ + ../../libnetdata/config/appconfig.o \ + ../../libnetdata/libnetdata.o \ + ../../libnetdata/buffer/buffer.o \ + ../../libnetdata/adaptive_resortable_list/adaptive_resortable_list.o \ + ../../libnetdata/locks/locks.o \ + ../../libnetdata/log/log.o \ + $(NULL) + +COMMON_LDFLAGS = $(LIBNETDATA_FILES) -pthread -lm + +all: statsd-stress benchmark-procfile-parser test-eval benchmark-dictionary benchmark-value-pairs + +benchmark-procfile-parser: benchmark-procfile-parser.c + gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS} + +benchmark-dictionary: benchmark-dictionary.c + gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS} + +benchmark-value-pairs: benchmark-value-pairs.c + gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS} + +statsd-stress: statsd-stress.c + gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS} + +test-eval: test-eval.c + gcc ${CFLAGS} -o $@ $^ ${COMMON_LDFLAGS} + + +clean: + rm -f benchmark-procfile-parser statsd-stress test-eval benchmark-dictionary benchmark-value-pairs + diff --git a/tests/profile/benchmark-dictionary.c b/tests/profile/benchmark-dictionary.c new file mode 100644 index 000000000..30c098d5d --- /dev/null +++ b/tests/profile/benchmark-dictionary.c @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * 1. build netdata (as normally) + * 2. cd tests/profile/ + * 3. compile with: + * gcc -O3 -Wall -Wextra -I ../../src/ -I ../../ -o benchmark-dictionary benchmark-dictionary.c ../../src/dictionary.o ../../src/log.o ../../src/avl.o ../../src/common.o -pthread + * + */ + +#include "config.h" +#include "libnetdata/libnetdata.h" + +struct myvalue { + int i; +}; + +void netdata_cleanup_and_exit(int ret) { exit(ret); } + +int main(int argc, char **argv) { + if(argc || argv) {;} + +// DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED|DICTIONARY_FLAG_WITH_STATISTICS); + DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_WITH_STATISTICS); + if(!dict) fatal("Cannot create dictionary."); + + struct rusage start, end; + unsigned long long dt; + char buf[100 + 1]; + struct myvalue value, *v; + int i, max = 30000000, max2; + + // ------------------------------------------------------------------------ + + getrusage(RUSAGE_SELF, &start); + dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL; + fprintf(stderr, "Inserting %d entries in the dictionary\n", max); + for(i = 0; i < max; i++) { + value.i = i; + snprintf(buf, 100, "%d", i); + + dictionary_set(dict, buf, &value, sizeof(struct myvalue)); + } + getrusage(RUSAGE_SELF, &end); + dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec); + fprintf(stderr, "Added %d entries in %llu nanoseconds: %llu inserts per second\n", max, dt, max * 1000000ULL / dt); + fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches); + + // ------------------------------------------------------------------------ + + getrusage(RUSAGE_SELF, &start); + dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL; + fprintf(stderr, "Retrieving %d entries from the dictionary\n", max); + for(i = 0; i < max; i++) { + value.i = i; + snprintf(buf, 100, "%d", i); + + v = dictionary_get(dict, buf); + if(!v) + fprintf(stderr, "ERROR: cannot get value %d from the dictionary\n", i); + else if(v->i != i) + fprintf(stderr, "ERROR: expected %d but got %d\n", i, v->i); + } + getrusage(RUSAGE_SELF, &end); + dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec); + fprintf(stderr, "Read %d entries in %llu nanoseconds: %llu searches per second\n", max, dt, max * 1000000ULL / dt); + fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches); + + // ------------------------------------------------------------------------ + + getrusage(RUSAGE_SELF, &start); + dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL; + fprintf(stderr, "Resetting %d entries in the dictionary\n", max); + for(i = 0; i < max; i++) { + value.i = i; + snprintf(buf, 100, "%d", i); + + dictionary_set(dict, buf, &value, sizeof(struct myvalue)); + } + getrusage(RUSAGE_SELF, &end); + dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec); + fprintf(stderr, "Reset %d entries in %llu nanoseconds: %llu resets per second\n", max, dt, max * 1000000ULL / dt); + fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches); + + // ------------------------------------------------------------------------ + + getrusage(RUSAGE_SELF, &start); + dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL; + fprintf(stderr, "Searching %d non-existing entries in the dictionary\n", max); + max2 = max * 2; + for(i = max; i < max2; i++) { + value.i = i; + snprintf(buf, 100, "%d", i); + + v = dictionary_get(dict, buf); + if(v) + fprintf(stderr, "ERROR: cannot got non-existing value %d from the dictionary\n", i); + } + getrusage(RUSAGE_SELF, &end); + dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec); + fprintf(stderr, "Searched %d non-existing entries in %llu nanoseconds: %llu not found searches per second\n", max, dt, max * 1000000ULL / dt); + fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches); + + // ------------------------------------------------------------------------ + + getrusage(RUSAGE_SELF, &start); + dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL; + fprintf(stderr, "Deleting %d entries from the dictionary\n", max); + for(i = 0; i < max; i++) { + value.i = i; + snprintf(buf, 100, "%d", i); + + dictionary_del(dict, buf); + } + getrusage(RUSAGE_SELF, &end); + dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec); + fprintf(stderr, "Deleted %d entries in %llu nanoseconds: %llu deletes per second\n", max, dt, max * 1000000ULL / dt); + fprintf(stderr, " > Dictionary: %llu inserts, %llu deletes, %llu searches\n\n", dict->stats->inserts, dict->stats->deletes, dict->stats->searches); + + // ------------------------------------------------------------------------ + + getrusage(RUSAGE_SELF, &start); + dict->stats->inserts = dict->stats->deletes = dict->stats->searches = 0ULL; + fprintf(stderr, "Destroying dictionary\n"); + dictionary_destroy(dict); + getrusage(RUSAGE_SELF, &end); + dt = (end.ru_utime.tv_sec * 1000000ULL + end.ru_utime.tv_usec) - (start.ru_utime.tv_sec * 1000000ULL + start.ru_utime.tv_usec); + fprintf(stderr, "Destroyed in %llu nanoseconds\n", dt); + + return 0; +} diff --git a/tests/profile/benchmark-line-parsing.c b/tests/profile/benchmark-line-parsing.c new file mode 100644 index 000000000..c07d1d857 --- /dev/null +++ b/tests/profile/benchmark-line-parsing.c @@ -0,0 +1,707 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +#include +#include +#include +#include +#include +#include + +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) + +#define simple_hash(name) ({ \ + register unsigned char *__hash_source = (unsigned char *)(name); \ + register uint32_t __hash_value = 0x811c9dc5; \ + while (*__hash_source) { \ + __hash_value *= 16777619; \ + __hash_value ^= (uint32_t) *__hash_source++; \ + } \ + __hash_value; \ +}) + +static inline uint32_t simple_hash2(const char *name) { + register unsigned char *s = (unsigned char *)name; + register uint32_t hval = 0x811c9dc5; + while (*s) { + hval *= 16777619; + hval ^= (uint32_t) *s++; + } + return hval; +} + +static inline unsigned long long fast_strtoull(const char *s) { + register unsigned long long n = 0; + register char c; + for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { + n *= 10; + n += c - '0'; + // n = (n << 1) + (n << 3) + (c - '0'); + } + return n; +} + +static uint32_t cache_hash = 0; +static uint32_t rss_hash = 0; +static uint32_t rss_huge_hash = 0; +static uint32_t mapped_file_hash = 0; +static uint32_t writeback_hash = 0; +static uint32_t dirty_hash = 0; +static uint32_t swap_hash = 0; +static uint32_t pgpgin_hash = 0; +static uint32_t pgpgout_hash = 0; +static uint32_t pgfault_hash = 0; +static uint32_t pgmajfault_hash = 0; +static uint32_t inactive_anon_hash = 0; +static uint32_t active_anon_hash = 0; +static uint32_t inactive_file_hash = 0; +static uint32_t active_file_hash = 0; +static uint32_t unevictable_hash = 0; +static uint32_t hierarchical_memory_limit_hash = 0; +static uint32_t total_cache_hash = 0; +static uint32_t total_rss_hash = 0; +static uint32_t total_rss_huge_hash = 0; +static uint32_t total_mapped_file_hash = 0; +static uint32_t total_writeback_hash = 0; +static uint32_t total_dirty_hash = 0; +static uint32_t total_swap_hash = 0; +static uint32_t total_pgpgin_hash = 0; +static uint32_t total_pgpgout_hash = 0; +static uint32_t total_pgfault_hash = 0; +static uint32_t total_pgmajfault_hash = 0; +static uint32_t total_inactive_anon_hash = 0; +static uint32_t total_active_anon_hash = 0; +static uint32_t total_inactive_file_hash = 0; +static uint32_t total_active_file_hash = 0; +static uint32_t total_unevictable_hash = 0; + +char *strings[] = { + "cache", + "rss", + "rss_huge", + "mapped_file", + "writeback", + "dirty", + "swap", + "pgpgin", + "pgpgout", + "pgfault", + "pgmajfault", + "inactive_anon", + "active_anon", + "inactive_file", + "active_file", + "unevictable", + "hierarchical_memory_limit", + "total_cache", + "total_rss", + "total_rss_huge", + "total_mapped_file", + "total_writeback", + "total_dirty", + "total_swap", + "total_pgpgin", + "total_pgpgout", + "total_pgfault", + "total_pgmajfault", + "total_inactive_anon", + "total_active_anon", + "total_inactive_file", + "total_active_file", + "total_unevictable", + NULL +}; + +unsigned long long values1[12] = { 0 }; +unsigned long long values2[12] = { 0 }; +unsigned long long values3[12] = { 0 }; +unsigned long long values4[12] = { 0 }; +unsigned long long values5[12] = { 0 }; +unsigned long long values6[12] = { 0 }; + +#define NUMBER1 "12345678901234" +#define NUMBER2 "23456789012345" +#define NUMBER3 "34567890123456" +#define NUMBER4 "45678901234567" +#define NUMBER5 "56789012345678" +#define NUMBER6 "67890123456789" +#define NUMBER7 "78901234567890" +#define NUMBER8 "89012345678901" +#define NUMBER9 "90123456789012" +#define NUMBER10 "12345678901234" +#define NUMBER11 "23456789012345" + +// simple system strcmp() +void test1() { + int i; + for(i = 0; strings[i] ; i++) { + char *s = strings[i]; + + if(unlikely(!strcmp(s, "cache"))) + values1[i] = strtoull(NUMBER1, NULL, 10); + + else if(unlikely(!strcmp(s, "rss"))) + values1[i] = strtoull(NUMBER2, NULL, 10); + + else if(unlikely(!strcmp(s, "rss_huge"))) + values1[i] = strtoull(NUMBER3, NULL, 10); + + else if(unlikely(!strcmp(s, "mapped_file"))) + values1[i] = strtoull(NUMBER4, NULL, 10); + + else if(unlikely(!strcmp(s, "writeback"))) + values1[i] = strtoull(NUMBER5, NULL, 10); + + else if(unlikely(!strcmp(s, "dirty"))) + values1[i] = strtoull(NUMBER6, NULL, 10); + + else if(unlikely(!strcmp(s, "swap"))) + values1[i] = strtoull(NUMBER7, NULL, 10); + + else if(unlikely(!strcmp(s, "pgpgin"))) + values1[i] = strtoull(NUMBER8, NULL, 10); + + else if(unlikely(!strcmp(s, "pgpgout"))) + values1[i] = strtoull(NUMBER9, NULL, 10); + + else if(unlikely(!strcmp(s, "pgfault"))) + values1[i] = strtoull(NUMBER10, NULL, 10); + + else if(unlikely(!strcmp(s, "pgmajfault"))) + values1[i] = strtoull(NUMBER11, NULL, 10); + } +} + +// inline simple_hash() with system strtoull() +void test2() { + int i; + for(i = 0; strings[i] ; i++) { + char *s = strings[i]; + uint32_t hash = simple_hash2(s); + + if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) + values2[i] = strtoull(NUMBER1, NULL, 10); + + else if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) + values2[i] = strtoull(NUMBER2, NULL, 10); + + else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) + values2[i] = strtoull(NUMBER3, NULL, 10); + + else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) + values2[i] = strtoull(NUMBER4, NULL, 10); + + else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) + values2[i] = strtoull(NUMBER5, NULL, 10); + + else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) + values2[i] = strtoull(NUMBER6, NULL, 10); + + else if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) + values2[i] = strtoull(NUMBER7, NULL, 10); + + else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) + values2[i] = strtoull(NUMBER8, NULL, 10); + + else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) + values2[i] = strtoull(NUMBER9, NULL, 10); + + else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) + values2[i] = strtoull(NUMBER10, NULL, 10); + + else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) + values2[i] = strtoull(NUMBER11, NULL, 10); + } +} + +// statement expression simple_hash(), system strtoull() +void test3() { + int i; + for(i = 0; strings[i] ; i++) { + char *s = strings[i]; + uint32_t hash = simple_hash(s); + + if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) + values3[i] = strtoull(NUMBER1, NULL, 10); + + else if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) + values3[i] = strtoull(NUMBER2, NULL, 10); + + else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) + values3[i] = strtoull(NUMBER3, NULL, 10); + + else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) + values3[i] = strtoull(NUMBER4, NULL, 10); + + else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) + values3[i] = strtoull(NUMBER5, NULL, 10); + + else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) + values3[i] = strtoull(NUMBER6, NULL, 10); + + else if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) + values3[i] = strtoull(NUMBER7, NULL, 10); + + else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) + values3[i] = strtoull(NUMBER8, NULL, 10); + + else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) + values3[i] = strtoull(NUMBER9, NULL, 10); + + else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) + values3[i] = strtoull(NUMBER10, NULL, 10); + + else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) + values3[i] = strtoull(NUMBER11, NULL, 10); + } +} + + +// inline simple_hash(), if-continue checks +void test4() { + int i; + for(i = 0; strings[i] ; i++) { + char *s = strings[i]; + uint32_t hash = simple_hash2(s); + + if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) { + values4[i] = strtoull(NUMBER1, NULL, 0); + continue; + } + + if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) { + values4[i] = strtoull(NUMBER2, NULL, 0); + continue; + } + + if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) { + values4[i] = strtoull(NUMBER3, NULL, 0); + continue; + } + + if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) { + values4[i] = strtoull(NUMBER4, NULL, 0); + continue; + } + + if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) { + values4[i] = strtoull(NUMBER5, NULL, 0); + continue; + } + + if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) { + values4[i] = strtoull(NUMBER6, NULL, 0); + continue; + } + + if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) { + values4[i] = strtoull(NUMBER7, NULL, 0); + continue; + } + + if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) { + values4[i] = strtoull(NUMBER8, NULL, 0); + continue; + } + + if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) { + values4[i] = strtoull(NUMBER9, NULL, 0); + continue; + } + + if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) { + values4[i] = strtoull(NUMBER10, NULL, 0); + continue; + } + + if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) { + values4[i] = strtoull(NUMBER11, NULL, 0); + continue; + } + } +} + +// inline simple_hash(), if-else-if-else-if (netdata default) +void test5() { + int i; + for(i = 0; strings[i] ; i++) { + char *s = strings[i]; + uint32_t hash = simple_hash2(s); + + if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) + values5[i] = fast_strtoull(NUMBER1); + + else if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) + values5[i] = fast_strtoull(NUMBER2); + + else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) + values5[i] = fast_strtoull(NUMBER3); + + else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) + values5[i] = fast_strtoull(NUMBER4); + + else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) + values5[i] = fast_strtoull(NUMBER5); + + else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) + values5[i] = fast_strtoull(NUMBER6); + + else if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) + values5[i] = fast_strtoull(NUMBER7); + + else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) + values5[i] = fast_strtoull(NUMBER8); + + else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) + values5[i] = fast_strtoull(NUMBER9); + + else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) + values5[i] = fast_strtoull(NUMBER10); + + else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) + values5[i] = fast_strtoull(NUMBER11); + } +} + +// ---------------------------------------------------------------------------- + +struct entry { + char *name; + uint32_t hash; + int found; + void (*func)(void *data1, void *data2); + void *data1; + void *data2; + struct entry *prev, *next; +}; + +struct base { + int iteration; + int registered; + int wanted; + int found; + struct entry *entries, *last; +}; + +static inline void callback(void *data1, void *data2) { + char *string = data1; + unsigned long long *value = data2; + *value = fast_strtoull(string); +} + +static inline void callback_system_strtoull(void *data1, void *data2) { + char *string = data1; + unsigned long long *value = data2; + *value = strtoull(string, NULL, 10); +} + + +static inline struct base *entry(struct base *base, const char *name, void *data1, void *data2, void (*func)(void *, void *)) { + if(!base) + base = calloc(1, sizeof(struct base)); + + struct entry *e = malloc(sizeof(struct entry)); + e->name = strdup(name); + e->hash = simple_hash2(e->name); + e->data1 = data1; + e->data2 = data2; + e->func = func; + e->prev = NULL; + e->next = base->entries; + + if(base->entries) base->entries->prev = e; + else base->last = e; + + base->entries = e; + base->registered++; + base->wanted = base->registered; + + return base; +} + +static inline int check(struct base *base, const char *s) { + uint32_t hash = simple_hash2(s); + + if(likely(!strcmp(s, base->last->name))) { + base->last->found = 1; + base->found++; + if(base->last->func) base->last->func(base->last->data1, base->last->data2); + base->last = base->last->next; + + if(!base->last) + base->last = base->entries; + + if(base->found == base->registered) + return 1; + + return 0; + } + + // find it + struct entry *e; + for(e = base->entries; e ; e = e->next) + if(e->hash == hash && !strcmp(e->name, s)) + break; + + if(e == base->last) { + printf("ERROR\n"); + exit(1); + } + + if(e) { + // found + + // run it + if(e->func) e->func(e->data1, e->data2); + + // unlink it + if(e->next) e->next->prev = e->prev; + if(e->prev) e->prev->next = e->next; + + if(base->entries == e) + base->entries = e->next; + } + else { + // not found + + // create it + e = calloc(1, sizeof(struct entry)); + e->name = strdup(s); + e->hash = hash; + } + + // link it here + e->next = base->last; + if(base->last) { + e->prev = base->last->prev; + base->last->prev = e; + + if(base->entries == base->last) + base->entries = e; + } + else + e->prev = NULL; + + if(e->prev) + e->prev->next = e; + + base->last = e->next; + if(!base->last) + base->last = base->entries; + + e->found = 1; + base->found++; + + if(base->found == base->registered) + return 1; + + printf("relinked '%s' after '%s' and before '%s': ", e->name, e->prev?e->prev->name:"NONE", e->next?e->next->name:"NONE"); + for(e = base->entries; e ; e = e->next) printf("%s ", e->name); + printf("\n"); + + return 0; +} + +static inline void begin(struct base *base) { + + if(unlikely(base->iteration % 60) == 1) { + base->wanted = 0; + struct entry *e; + for(e = base->entries; e ; e = e->next) + if(e->found) base->wanted++; + } + + base->iteration++; + base->last = base->entries; + base->found = 0; +} + +void test6() { + + static struct base *base = NULL; + + if(unlikely(!base)) { + base = entry(base, "cache", NUMBER1, &values6[0], callback_system_strtoull); + base = entry(base, "rss", NUMBER2, &values6[1], callback_system_strtoull); + base = entry(base, "rss_huge", NUMBER3, &values6[2], callback_system_strtoull); + base = entry(base, "mapped_file", NUMBER4, &values6[3], callback_system_strtoull); + base = entry(base, "writeback", NUMBER5, &values6[4], callback_system_strtoull); + base = entry(base, "dirty", NUMBER6, &values6[5], callback_system_strtoull); + base = entry(base, "swap", NUMBER7, &values6[6], callback_system_strtoull); + base = entry(base, "pgpgin", NUMBER8, &values6[7], callback_system_strtoull); + base = entry(base, "pgpgout", NUMBER9, &values6[8], callback_system_strtoull); + base = entry(base, "pgfault", NUMBER10, &values6[9], callback_system_strtoull); + base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback_system_strtoull); + } + + begin(base); + + int i; + for(i = 0; strings[i] ; i++) { + if(check(base, strings[i])) + break; + } +} + +void test7() { + + static struct base *base = NULL; + + if(unlikely(!base)) { + base = entry(base, "cache", NUMBER1, &values6[0], callback); + base = entry(base, "rss", NUMBER2, &values6[1], callback); + base = entry(base, "rss_huge", NUMBER3, &values6[2], callback); + base = entry(base, "mapped_file", NUMBER4, &values6[3], callback); + base = entry(base, "writeback", NUMBER5, &values6[4], callback); + base = entry(base, "dirty", NUMBER6, &values6[5], callback); + base = entry(base, "swap", NUMBER7, &values6[6], callback); + base = entry(base, "pgpgin", NUMBER8, &values6[7], callback); + base = entry(base, "pgpgout", NUMBER9, &values6[8], callback); + base = entry(base, "pgfault", NUMBER10, &values6[9], callback); + base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback); + } + + begin(base); + + int i; + for(i = 0; strings[i] ; i++) { + if(check(base, strings[i])) + break; + } +} + +// ---------------------------------------------------------------------------- + + +// ============== +// --- Poor man cycle counting. +static unsigned long tsc; + +static void begin_tsc(void) +{ + unsigned long a, d; + asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx"); + tsc = ((unsigned long)d << 32) | (unsigned long)a; +} + +static unsigned long end_tsc(void) +{ + unsigned long a, d; + asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx"); + return (((unsigned long)d << 32) | (unsigned long)a) - tsc; +} +// =============== + +static unsigned long long clk; + +static void begin_clock() { + struct timeval tv; + if(unlikely(gettimeofday(&tv, NULL) == -1)) + return; + clk = tv.tv_sec * 1000000 + tv.tv_usec; +} + +static unsigned long long end_clock() { + struct timeval tv; + if(unlikely(gettimeofday(&tv, NULL) == -1)) + return -1; + return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk; +} + +void main(void) +{ + cache_hash = simple_hash("cache"); + rss_hash = simple_hash("rss"); + rss_huge_hash = simple_hash("rss_huge"); + mapped_file_hash = simple_hash("mapped_file"); + writeback_hash = simple_hash("writeback"); + dirty_hash = simple_hash("dirty"); + swap_hash = simple_hash("swap"); + pgpgin_hash = simple_hash("pgpgin"); + pgpgout_hash = simple_hash("pgpgout"); + pgfault_hash = simple_hash("pgfault"); + pgmajfault_hash = simple_hash("pgmajfault"); + inactive_anon_hash = simple_hash("inactive_anon"); + active_anon_hash = simple_hash("active_anon"); + inactive_file_hash = simple_hash("inactive_file"); + active_file_hash = simple_hash("active_file"); + unevictable_hash = simple_hash("unevictable"); + hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit"); + total_cache_hash = simple_hash("total_cache"); + total_rss_hash = simple_hash("total_rss"); + total_rss_huge_hash = simple_hash("total_rss_huge"); + total_mapped_file_hash = simple_hash("total_mapped_file"); + total_writeback_hash = simple_hash("total_writeback"); + total_dirty_hash = simple_hash("total_dirty"); + total_swap_hash = simple_hash("total_swap"); + total_pgpgin_hash = simple_hash("total_pgpgin"); + total_pgpgout_hash = simple_hash("total_pgpgout"); + total_pgfault_hash = simple_hash("total_pgfault"); + total_pgmajfault_hash = simple_hash("total_pgmajfault"); + total_inactive_anon_hash = simple_hash("total_inactive_anon"); + total_active_anon_hash = simple_hash("total_active_anon"); + total_inactive_file_hash = simple_hash("total_inactive_file"); + total_active_file_hash = simple_hash("total_active_file"); + total_unevictable_hash = simple_hash("total_unevictable"); + + // cache functions + (void)simple_hash2("hello world"); + (void)strcmp("1", "2"); + (void)strtoull("123", NULL, 0); + + unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7; + unsigned long max = 1000000; + + // let the processor get up to speed + begin_clock(); + for(i = 0; i <= max ;i++) test1(); + c1 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test1(); + c1 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test2(); + c2 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test3(); + c3 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test4(); + c4 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test5(); + c5 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test6(); + c6 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test7(); + c7 = end_clock(); + + for(i = 0; i < 11 ; i++) + printf("value %lu: %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i]); + + printf("\n\nRESULTS\n"); + printf("test1() in %lu usecs: if-else-if-else-if, simple strcmp() with system strtoull().\n" + "test2() in %lu usecs: inline simple_hash() if-else-if-else-if, with system strtoull().\n" + "test3() in %lu usecs: statement expression simple_hash(), system strtoull().\n" + "test4() in %lu usecs: inline simple_hash(), if-continue checks, system strtoull().\n" + "test5() in %lu usecs: inline simple_hash(), if-else-if-else-if, custom strtoull() (netdata default prior to ARL).\n" + "test6() in %lu usecs: adaptive re-sortable list, system strtoull() (wow!)\n" + "test7() in %lu usecs: adaptive re-sortable list, custom strtoull() (wow!)\n" + , c1 + , c2 + , c3 + , c4 + , c5 + , c6 + , c7 + ); + +} diff --git a/tests/profile/benchmark-procfile-parser.c b/tests/profile/benchmark-procfile-parser.c new file mode 100644 index 000000000..991e2dfc8 --- /dev/null +++ b/tests/profile/benchmark-procfile-parser.c @@ -0,0 +1,329 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ + +#include "config.h" +#include "libnetdata/libnetdata.h" + +void netdata_cleanup_and_exit(int ret) { + exit(ret); +} + +#define PF_PREFIX "PROCFILE" +#define PFWORDS_INCREASE_STEP 200 +#define PFLINES_INCREASE_STEP 10 +#define PROCFILE_INCREMENT_BUFFER 512 +extern size_t procfile_max_lines; +extern size_t procfile_max_words; +extern size_t procfile_max_allocation; + + +static inline void pflines_reset(pflines *fl) { + // debug(D_PROCFILE, PF_PREFIX ": reseting lines"); + + fl->len = 0; +} + +static inline void pflines_free(pflines *fl) { + // debug(D_PROCFILE, PF_PREFIX ": freeing lines"); + + freez(fl); +} + +static inline void pfwords_reset(pfwords *fw) { + // debug(D_PROCFILE, PF_PREFIX ": reseting words"); + fw->len = 0; +} + + +static inline void pfwords_add(procfile *ff, char *str) { + // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str); + + pfwords *fw = ff->words; + if(unlikely(fw->len == fw->size)) { + // debug(D_PROCFILE, PF_PREFIX ": expanding words"); + + ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *)); + fw->size += PFWORDS_INCREASE_STEP; + } + + fw->words[fw->len++] = str; +} + +NEVERNULL +static inline size_t *pflines_add(procfile *ff) { + // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word); + + pflines *fl = ff->lines; + if(unlikely(fl->len == fl->size)) { + // debug(D_PROCFILE, PF_PREFIX ": expanding lines"); + + ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline)); + fl->size += PFLINES_INCREASE_STEP; + } + + ffline *ffl = &fl->lines[fl->len++]; + ffl->words = 0; + ffl->first = ff->words->len; + + return &ffl->words; +} + + +NOINLINE +static void procfile_parser(procfile *ff) { + // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename); + + char *s = ff->data // our current position + , *e = &ff->data[ff->len] // the terminating null + , *t = ff->data; // the first character of a word (or quoted / parenthesized string) + + // the look up array to find our type of character + PF_CHAR_TYPE *separators = ff->separators; + + char quote = 0; // the quote character - only when in quoted string + size_t opened = 0; // counts the number of open parenthesis + + size_t *line_words = pflines_add(ff); + + while(s < e) { + PF_CHAR_TYPE ct = separators[(unsigned char)(*s)]; + + // this is faster than a switch() + // read more here: http://lazarenko.me/switch/ + switch(ct) { + case PF_CHAR_IS_SEPARATOR: + if(!quote && !opened) { + if (s != t) { + // separator, but we have word before it + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + } + t = s + 1; + } + // fallthrough + + case PF_CHAR_IS_WORD: + s++; + break; + + + case PF_CHAR_IS_NEWLINE: + // end of line + + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + t = ++s; + + // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words); + + line_words = pflines_add(ff); + break; + + case PF_CHAR_IS_QUOTE: + if(unlikely(!quote && s == t)) { + // quote opened at the beginning + quote = *s; + t = ++s; + } + else if(unlikely(quote && quote == *s)) { + // quote closed + quote = 0; + + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + t = ++s; + } + else + s++; + break; + + case PF_CHAR_IS_OPEN: + if(s == t) { + opened++; + t = ++s; + } + else if(opened) { + opened++; + s++; + } + else + s++; + break; + + case PF_CHAR_IS_CLOSE: + if(opened) { + opened--; + + if(!opened) { + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + t = ++s; + } + else + s++; + } + else + s++; + break; + + default: + fatal("Internal Error: procfile_readall() does not handle all the cases."); + } + } + + if(likely(s > t && t < e)) { + // the last word + if(unlikely(ff->len >= ff->size)) { + // we are going to loose the last byte + s = &ff->data[ff->size - 1]; + } + + *s = '\0'; + pfwords_add(ff, t); + (*line_words)++; + // t = ++s; + } +} + + +procfile *procfile_readall1(procfile *ff) { + // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename); + + ff->len = 0; // zero the used size + ssize_t r = 1; // read at least once + while(r > 0) { + ssize_t s = ff->len; + ssize_t x = ff->size - s; + + if(unlikely(!x)) { + debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff)); + ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER); + ff->size += PROCFILE_INCREMENT_BUFFER; + } + + debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s)); + r = read(ff->fd, &ff->data[s], ff->size - s); + if(unlikely(r == -1)) { + if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd); + procfile_close(ff); + return NULL; + } + + ff->len += r; + } + + // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename); + if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) { + if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff)); + procfile_close(ff); + return NULL; + } + + pflines_reset(ff->lines); + pfwords_reset(ff->words); + procfile_parser(ff); + + if(unlikely(procfile_adaptive_initial_allocation)) { + if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len; + if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len; + if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len; + } + + // debug(D_PROCFILE, "File '%s' updated.", ff->filename); + return ff; +} + + + + + + + + +// ============== +// --- Poor man cycle counting. +static unsigned long tsc; + +void begin_tsc(void) +{ + unsigned long a, d; + asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx"); + tsc = ((unsigned long)d << 32) | (unsigned long)a; +} + +unsigned long end_tsc(void) +{ + unsigned long a, d; + asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx"); + return (((unsigned long)d << 32) | (unsigned long)a) - tsc; +} +// ============== + + +unsigned long test_netdata_internal(void) { + static procfile *ff = NULL; + + ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); + if(!ff) { + fprintf(stderr, "Failed to open filename\n"); + exit(1); + } + + begin_tsc(); + ff = procfile_readall(ff); + unsigned long c = end_tsc(); + + if(!ff) { + fprintf(stderr, "Failed to read filename\n"); + exit(1); + } + + return c; +} + +unsigned long test_method1(void) { + static procfile *ff = NULL; + + ff = procfile_reopen(ff, "/proc/self/status", " \t:,-()/", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO); + if(!ff) { + fprintf(stderr, "Failed to open filename\n"); + exit(1); + } + + begin_tsc(); + ff = procfile_readall1(ff); + unsigned long c = end_tsc(); + + if(!ff) { + fprintf(stderr, "Failed to read filename\n"); + exit(1); + } + + return c; +} + +//--- Test +int main(int argc, char **argv) +{ + (void)argc; (void)argv; + + int i, max = 1000000; + + unsigned long c1 = 0; + test_netdata_internal(); + for(i = 0; i < max ; i++) + c1 += test_netdata_internal(); + + unsigned long c2 = 0; + test_method1(); + for(i = 0; i < max ; i++) + c2 += test_method1(); + + printf("netdata internal: completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c1, c1 / max, (float)c1 * 100.0 / (float)c1); + printf("method1 : completed in %lu cycles, %lu cycles per read, %0.2f %%.\n", c2, c2 / max, (float)c2 * 100.0 / (float)c1); + + return 0; +} diff --git a/tests/profile/benchmark-registry.c b/tests/profile/benchmark-registry.c new file mode 100644 index 000000000..cfed6d7c8 --- /dev/null +++ b/tests/profile/benchmark-registry.c @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ + +/* + * compile with + * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o benchmark-registry benchmark-registry.c ../src/dictionary.o ../src/log.o ../src/avl.o ../src/common.o ../src/appconfig.o ../src/web_buffer.o ../src/storage_number.o ../src/rrd.o ../src/health.o -pthread -luuid -lm -DHAVE_CONFIG_H -DVARLIB_DIR="\"/tmp\"" + */ + +char *hostname = "me"; + +#include "../src/registry.c" + +void netdata_cleanup_and_exit(int ret) { exit(ret); } + +// ---------------------------------------------------------------------------- +// TESTS + +int test1(int argc, char **argv) { + + void print_stats(uint32_t requests, unsigned long long start, unsigned long long end) { + fprintf(stderr, " > SPEED: %u requests served in %0.2f seconds ( >>> %llu per second <<< )\n", + requests, (end-start) / 1000000.0, (unsigned long long)requests * 1000000ULL / (end-start)); + + fprintf(stderr, " > DB : persons %llu, machines %llu, unique URLs %llu, accesses %llu, URLs: for persons %llu, for machines %llu\n", + registry.persons_count, registry.machines_count, registry.urls_count, registry.usages_count, + registry.persons_urls_count, registry.machines_urls_count); + } + + (void) argc; + (void) argv; + + uint32_t u, users = 1000000; + uint32_t m, machines = 200000; + uint32_t machines2 = machines * 2; + + char **users_guids = malloc(users * sizeof(char *)); + char **machines_guids = malloc(machines2 * sizeof(char *)); + char **machines_urls = malloc(machines2 * sizeof(char *)); + unsigned long long start; + + registry_init(); + + fprintf(stderr, "Generating %u machine guids\n", machines2); + for(m = 0; m < machines2 ;m++) { + uuid_t uuid; + machines_guids[m] = malloc(36+1); + uuid_generate(uuid); + uuid_unparse(uuid, machines_guids[m]); + + char buf[FILENAME_MAX + 1]; + snprintfz(buf, FILENAME_MAX, "http://%u.netdata.rocks/", m+1); + machines_urls[m] = strdup(buf); + + // fprintf(stderr, "\tmachine %u: '%s', url: '%s'\n", m + 1, machines_guids[m], machines_urls[m]); + } + + start = timems(); + fprintf(stderr, "\nGenerating %u users accessing %u machines\n", users, machines); + m = 0; + time_t now = time(NULL); + for(u = 0; u < users ; u++) { + if(++m == machines) m = 0; + + PERSON *p = registry_request_access(NULL, machines_guids[m], machines_urls[m], "test", now); + users_guids[u] = p->guid; + } + print_stats(u, start, timems()); + + start = timems(); + fprintf(stderr, "\nAll %u users accessing again the same %u servers\n", users, machines); + m = 0; + now = time(NULL); + for(u = 0; u < users ; u++) { + if(++m == machines) m = 0; + + PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now); + + if(p->guid != users_guids[u]) + fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid); + } + print_stats(u, start, timems()); + + start = timems(); + fprintf(stderr, "\nAll %u users accessing a new server, out of the %u servers\n", users, machines); + m = 1; + now = time(NULL); + for(u = 0; u < users ; u++) { + if(++m == machines) m = 0; + + PERSON *p = registry_request_access(users_guids[u], machines_guids[m], machines_urls[m], "test", now); + + if(p->guid != users_guids[u]) + fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[u], p->guid); + } + print_stats(u, start, timems()); + + start = timems(); + fprintf(stderr, "\n%u random users accessing a random server, out of the %u servers\n", users, machines); + now = time(NULL); + for(u = 0; u < users ; u++) { + uint32_t tu = random() * users / RAND_MAX; + uint32_t tm = random() * machines / RAND_MAX; + + PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now); + + if(p->guid != users_guids[tu]) + fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid); + } + print_stats(u, start, timems()); + + start = timems(); + fprintf(stderr, "\n%u random users accessing a random server, out of %u servers\n", users, machines2); + now = time(NULL); + for(u = 0; u < users ; u++) { + uint32_t tu = random() * users / RAND_MAX; + uint32_t tm = random() * machines2 / RAND_MAX; + + PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], machines_urls[tm], "test", now); + + if(p->guid != users_guids[tu]) + fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid); + } + print_stats(u, start, timems()); + + for(m = 0; m < 10; m++) { + start = timems(); + fprintf(stderr, + "\n%u random user accesses to a random server, out of %u servers,\n > using 1/10000 with a random url, 1/1000 with a mismatched url\n", + users * 2, machines2); + now = time(NULL); + for (u = 0; u < users * 2; u++) { + uint32_t tu = random() * users / RAND_MAX; + uint32_t tm = random() * machines2 / RAND_MAX; + + char *url = machines_urls[tm]; + char buf[FILENAME_MAX + 1]; + if (random() % 10000 == 1234) { + snprintfz(buf, FILENAME_MAX, "http://random.%ld.netdata.rocks/", random()); + url = buf; + } + else if (random() % 1000 == 123) + url = machines_urls[random() * machines2 / RAND_MAX]; + + PERSON *p = registry_request_access(users_guids[tu], machines_guids[tm], url, "test", now); + + if (p->guid != users_guids[tu]) + fprintf(stderr, "ERROR: expected to get user guid '%s' but git '%s'", users_guids[tu], p->guid); + } + print_stats(u, start, timems()); + } + + fprintf(stderr, "\n\nSAVE\n"); + start = timems(); + registry_save(); + print_stats(registry.persons_count, start, timems()); + + fprintf(stderr, "\n\nCLEANUP\n"); + start = timems(); + registry_free(); + print_stats(registry.persons_count, start, timems()); + return 0; +} + +// ---------------------------------------------------------------------------- +// TESTING + +int main(int argc, char **argv) { + config_set_boolean("registry", "enabled", 1); + + //debug_flags = 0xFFFFFFFF; + test1(argc, argv); + exit(0); + + (void)argc; + (void)argv; + + + PERSON *p1, *p2; + + fprintf(stderr, "\n\nINITIALIZATION\n"); + + registry_init(); + + int i = 2; + + fprintf(stderr, "\n\nADDING ENTRY\n"); + p1 = registry_request_access("2c95abd0-1542-11e6-8c66-00508db7e9c9", "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL)); + + if(0) + while(i--) { +#ifdef REGISTRY_STDOUT_DUMP + fprintf(stderr, "\n\nADDING ENTRY\n"); +#endif /* REGISTRY_STDOUT_DUMP */ + p1 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://localhost:19999/", "test", time(NULL)); + +#ifdef REGISTRY_STDOUT_DUMP + fprintf(stderr, "\n\nADDING ANOTHER URL\n"); +#endif /* REGISTRY_STDOUT_DUMP */ + p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://127.0.0.1:19999/", "test", time(NULL)); + +#ifdef REGISTRY_STDOUT_DUMP + fprintf(stderr, "\n\nADDING ANOTHER URL\n"); +#endif /* REGISTRY_STDOUT_DUMP */ + p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL)); + +#ifdef REGISTRY_STDOUT_DUMP + fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n"); +#endif /* REGISTRY_STDOUT_DUMP */ + p1 = registry_request_access(p1->guid, "7c173980-145c-11e6-b86f-00508db7e9c1", "http://my.server:19999/", "test", time(NULL)); + +#ifdef REGISTRY_STDOUT_DUMP + fprintf(stderr, "\n\nADDING ANOTHER PERSON\n"); +#endif /* REGISTRY_STDOUT_DUMP */ + p2 = registry_request_access(NULL, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL)); + +#ifdef REGISTRY_STDOUT_DUMP + fprintf(stderr, "\n\nADDING ANOTHER MACHINE\n"); +#endif /* REGISTRY_STDOUT_DUMP */ + p2 = registry_request_access(p2->guid, "7c173980-145c-11e6-b86f-00508db7e9c3", "http://localhost:19999/", "test", time(NULL)); + } + + fprintf(stderr, "\n\nSAVE\n"); + registry_save(); + + fprintf(stderr, "\n\nCLEANUP\n"); + registry_free(); + return 0; +} diff --git a/tests/profile/benchmark-value-pairs.c b/tests/profile/benchmark-value-pairs.c new file mode 100644 index 000000000..ae4f53c3a --- /dev/null +++ b/tests/profile/benchmark-value-pairs.c @@ -0,0 +1,623 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ + +#include "config.h" +#include "libnetdata/libnetdata.h" + +#ifdef simple_hash +#undef simple_hash +#endif + +void netdata_cleanup_and_exit(int ret) { + exit(ret); +} + +#define simple_hash(name) ({ \ + register unsigned char *__hash_source = (unsigned char *)(name); \ + register uint32_t __hash_value = 0x811c9dc5; \ + while (*__hash_source) { \ + __hash_value *= 16777619; \ + __hash_value ^= (uint32_t) *__hash_source++; \ + } \ + __hash_value; \ +}) + +static inline uint32_t simple_hash2(const char *name) { + register unsigned char *s = (unsigned char *)name; + register uint32_t hval = 0x811c9dc5; + while (*s) { + hval *= 16777619; + hval ^= (uint32_t) *s++; + } + return hval; +} + +static inline unsigned long long fast_strtoull(const char *s) { + register unsigned long long n = 0; + register char c; + for(c = *s; c >= '0' && c <= '9' ; c = *(++s)) { + n *= 10; + n += c - '0'; + // n = (n << 1) + (n << 3) + (c - '0'); + } + return n; +} + +static uint32_t cache_hash = 0; +static uint32_t rss_hash = 0; +static uint32_t rss_huge_hash = 0; +static uint32_t mapped_file_hash = 0; +static uint32_t writeback_hash = 0; +static uint32_t dirty_hash = 0; +static uint32_t swap_hash = 0; +static uint32_t pgpgin_hash = 0; +static uint32_t pgpgout_hash = 0; +static uint32_t pgfault_hash = 0; +static uint32_t pgmajfault_hash = 0; +static uint32_t inactive_anon_hash = 0; +static uint32_t active_anon_hash = 0; +static uint32_t inactive_file_hash = 0; +static uint32_t active_file_hash = 0; +static uint32_t unevictable_hash = 0; +static uint32_t hierarchical_memory_limit_hash = 0; +static uint32_t total_cache_hash = 0; +static uint32_t total_rss_hash = 0; +static uint32_t total_rss_huge_hash = 0; +static uint32_t total_mapped_file_hash = 0; +static uint32_t total_writeback_hash = 0; +static uint32_t total_dirty_hash = 0; +static uint32_t total_swap_hash = 0; +static uint32_t total_pgpgin_hash = 0; +static uint32_t total_pgpgout_hash = 0; +static uint32_t total_pgfault_hash = 0; +static uint32_t total_pgmajfault_hash = 0; +static uint32_t total_inactive_anon_hash = 0; +static uint32_t total_active_anon_hash = 0; +static uint32_t total_inactive_file_hash = 0; +static uint32_t total_active_file_hash = 0; +static uint32_t total_unevictable_hash = 0; + +unsigned long long values1[50] = { 0 }; +unsigned long long values2[50] = { 0 }; +unsigned long long values3[50] = { 0 }; +unsigned long long values4[50] = { 0 }; +unsigned long long values5[50] = { 0 }; +unsigned long long values6[50] = { 0 }; +unsigned long long values7[50] = { 0 }; +unsigned long long values8[50] = { 0 }; +unsigned long long values9[50] = { 0 }; + +struct pair { + const char *name; + const char *value; + uint32_t hash; + unsigned long long *collected8; + unsigned long long *collected9; +} pairs[] = { + { "cache", "12345678901234", 0, &values8[0] ,&values9[0] }, + { "rss", "23456789012345", 0, &values8[1] ,&values9[1] }, + { "rss_huge", "34567890123456", 0, &values8[2] ,&values9[2] }, + { "mapped_file", "45678901234567", 0, &values8[3] ,&values9[3] }, + { "writeback", "56789012345678", 0, &values8[4] ,&values9[4] }, + { "dirty", "67890123456789", 0, &values8[5] ,&values9[5] }, + { "swap", "78901234567890", 0, &values8[6] ,&values9[6] }, + { "pgpgin", "89012345678901", 0, &values8[7] ,&values9[7] }, + { "pgpgout", "90123456789012", 0, &values8[8] ,&values9[8] }, + { "pgfault", "10345678901234", 0, &values8[9] ,&values9[9] }, + { "pgmajfault", "11456789012345", 0, &values8[10] ,&values9[10] }, + { "inactive_anon", "12000000000000", 0, &values8[11] ,&values9[11] }, + { "active_anon", "13345678901234", 0, &values8[12] ,&values9[12] }, + { "inactive_file", "14345678901234", 0, &values8[13] ,&values9[13] }, + { "active_file", "15345678901234", 0, &values8[14] ,&values9[14] }, + { "unevictable", "16345678901234", 0, &values8[15] ,&values9[15] }, + { "hierarchical_memory_limit", "17345678901234", 0, &values8[16] ,&values9[16] }, + { "total_cache", "18345678901234", 0, &values8[17] ,&values9[17] }, + { "total_rss", "19345678901234", 0, &values8[18] ,&values9[18] }, + { "total_rss_huge", "20345678901234", 0, &values8[19] ,&values9[19] }, + { "total_mapped_file", "21345678901234", 0, &values8[20] ,&values9[20] }, + { "total_writeback", "22345678901234", 0, &values8[21] ,&values9[21] }, + { "total_dirty", "23000000000000", 0, &values8[22] ,&values9[22] }, + { "total_swap", "24345678901234", 0, &values8[23] ,&values9[23] }, + { "total_pgpgin", "25345678901234", 0, &values8[24] ,&values9[24] }, + { "total_pgpgout", "26345678901234", 0, &values8[25] ,&values9[25] }, + { "total_pgfault", "27345678901234", 0, &values8[26] ,&values9[26] }, + { "total_pgmajfault", "28345678901234", 0, &values8[27] ,&values9[27] }, + { "total_inactive_anon", "29345678901234", 0, &values8[28] ,&values9[28] }, + { "total_active_anon", "30345678901234", 0, &values8[29] ,&values9[29] }, + { "total_inactive_file", "31345678901234", 0, &values8[30] ,&values9[30] }, + { "total_active_file", "32345678901234", 0, &values8[31] ,&values9[31] }, + { "total_unevictable", "33345678901234", 0, &values8[32] ,&values9[32] }, + { NULL, NULL , 0, NULL ,NULL } +}; + +// simple system strcmp() +void test1() { + int i; + for(i = 0; pairs[i].name ; i++) { + const char *s = pairs[i].name; + const char *v = pairs[i].value; + + if(unlikely(!strcmp(s, "cache"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "rss"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "rss_huge"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "mapped_file"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "writeback"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "dirty"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "swap"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "pgpgin"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "pgpgout"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "pgfault"))) + values1[i] = strtoull(v, NULL, 10); + + else if(unlikely(!strcmp(s, "pgmajfault"))) + values1[i] = strtoull(v, NULL, 10); + } +} + +// inline simple_hash() with system strtoull() +void test2() { + int i; + for(i = 0; pairs[i].name ; i++) { + const char *s = pairs[i].name; + const char *v = pairs[i].value; + + uint32_t hash = simple_hash2(s); + + if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) + values2[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) + values2[i] = strtoull(v, NULL, 10); + } +} + +// statement expression simple_hash(), system strtoull() +void test3() { + int i; + for(i = 0; pairs[i].name ; i++) { + const char *s = pairs[i].name; + const char *v = pairs[i].value; + + uint32_t hash = simple_hash(s); + + if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) + values3[i] = strtoull(v, NULL, 10); + + else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) + values3[i] = strtoull(v, NULL, 10); + } +} + + +// inline simple_hash(), if-continue checks +void test4() { + int i; + for(i = 0; pairs[i].name ; i++) { + const char *s = pairs[i].name; + const char *v = pairs[i].value; + + uint32_t hash = simple_hash2(s); + + if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + + if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) { + values4[i] = strtoull(v, NULL, 0); + continue; + } + } +} + +// inline simple_hash(), if-else-if-else-if (netdata default) +void test5() { + int i; + for(i = 0; pairs[i].name ; i++) { + const char *s = pairs[i].name; + const char *v = pairs[i].value; + + uint32_t hash = simple_hash2(s); + + if(unlikely(hash == cache_hash && !strcmp(s, "cache"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == rss_hash && !strcmp(s, "rss"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == rss_huge_hash && !strcmp(s, "rss_huge"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == mapped_file_hash && !strcmp(s, "mapped_file"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == writeback_hash && !strcmp(s, "writeback"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == dirty_hash && !strcmp(s, "dirty"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == swap_hash && !strcmp(s, "swap"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == pgpgin_hash && !strcmp(s, "pgpgin"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == pgpgout_hash && !strcmp(s, "pgpgout"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == pgfault_hash && !strcmp(s, "pgfault"))) + values5[i] = fast_strtoull(v); + + else if(unlikely(hash == pgmajfault_hash && !strcmp(s, "pgmajfault"))) + values5[i] = fast_strtoull(v); + } +} + +// ---------------------------------------------------------------------------- + +void arl_strtoull(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; + (void)hash; + + register unsigned long long *d = dst; + *d = strtoull(value, NULL, 10); + // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d); +} + +void test6() { + static ARL_BASE *base = NULL; + + if(unlikely(!base)) { + base = arl_create("test6", arl_strtoull, 60); + arl_expect_custom(base, "cache", NULL, &values6[0]); + arl_expect_custom(base, "rss", NULL, &values6[1]); + arl_expect_custom(base, "rss_huge", NULL, &values6[2]); + arl_expect_custom(base, "mapped_file", NULL, &values6[3]); + arl_expect_custom(base, "writeback", NULL, &values6[4]); + arl_expect_custom(base, "dirty", NULL, &values6[5]); + arl_expect_custom(base, "swap", NULL, &values6[6]); + arl_expect_custom(base, "pgpgin", NULL, &values6[7]); + arl_expect_custom(base, "pgpgout", NULL, &values6[8]); + arl_expect_custom(base, "pgfault", NULL, &values6[9]); + arl_expect_custom(base, "pgmajfault", NULL, &values6[10]); + } + + arl_begin(base); + + int i; + for(i = 0; pairs[i].name ; i++) + if(arl_check(base, pairs[i].name, pairs[i].value)) break; +} + +void arl_str2ull(const char *name, uint32_t hash, const char *value, void *dst) { + (void)name; + (void)hash; + + register unsigned long long *d = dst; + *d = str2ull(value); + // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d); +} + +void test7() { + static ARL_BASE *base = NULL; + + if(unlikely(!base)) { + base = arl_create("test7", arl_str2ull, 60); + arl_expect_custom(base, "cache", NULL, &values7[0]); + arl_expect_custom(base, "rss", NULL, &values7[1]); + arl_expect_custom(base, "rss_huge", NULL, &values7[2]); + arl_expect_custom(base, "mapped_file", NULL, &values7[3]); + arl_expect_custom(base, "writeback", NULL, &values7[4]); + arl_expect_custom(base, "dirty", NULL, &values7[5]); + arl_expect_custom(base, "swap", NULL, &values7[6]); + arl_expect_custom(base, "pgpgin", NULL, &values7[7]); + arl_expect_custom(base, "pgpgout", NULL, &values7[8]); + arl_expect_custom(base, "pgfault", NULL, &values7[9]); + arl_expect_custom(base, "pgmajfault", NULL, &values7[10]); + } + + arl_begin(base); + + int i; + for(i = 0; pairs[i].name ; i++) + if(arl_check(base, pairs[i].name, pairs[i].value)) break; +} + +void test8() { + int i; + for(i = 0; pairs[i].name; i++) { + uint32_t hash = simple_hash(pairs[i].name); + + int j; + for(j = 0; pairs[j].name; j++) { + if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) { + *pairs[j].collected8 = strtoull(pairs[i].value, NULL, 10); + break; + } + } + } +} + +void test9() { + int i; + for(i = 0; pairs[i].name; i++) { + uint32_t hash = simple_hash(pairs[i].name); + + int j; + for(j = 0; pairs[j].name; j++) { + if(hash == pairs[j].hash && !strcmp(pairs[i].name, pairs[j].name)) { + *pairs[j].collected9 = str2ull(pairs[i].value); + break; + } + } + } +} + +// ---------------------------------------------------------------------------- + +/* +// ============== +// --- Poor man cycle counting. +static unsigned long tsc; + +static void begin_tsc(void) +{ + unsigned long a, d; + asm volatile ("cpuid\nrdtsc" : "=a" (a), "=d" (d) : "0" (0) : "ebx", "ecx"); + tsc = ((unsigned long)d << 32) | (unsigned long)a; +} + +static unsigned long end_tsc(void) +{ + unsigned long a, d; + asm volatile ("rdtscp" : "=a" (a), "=d" (d) : : "ecx"); + return (((unsigned long)d << 32) | (unsigned long)a) - tsc; +} +// =============== +*/ + +static unsigned long long clk; + +static void begin_clock() { + struct timeval tv; + if(unlikely(gettimeofday(&tv, NULL) == -1)) + return; + clk = tv.tv_sec * 1000000 + tv.tv_usec; +} + +static unsigned long long end_clock() { + struct timeval tv; + if(unlikely(gettimeofday(&tv, NULL) == -1)) + return -1; + return clk = tv.tv_sec * 1000000 + tv.tv_usec - clk; +} + +int main(void) +{ + { + int i; + for(i = 0; pairs[i].name; i++) + pairs[i].hash = simple_hash(pairs[i].name); + } + + cache_hash = simple_hash("cache"); + rss_hash = simple_hash("rss"); + rss_huge_hash = simple_hash("rss_huge"); + mapped_file_hash = simple_hash("mapped_file"); + writeback_hash = simple_hash("writeback"); + dirty_hash = simple_hash("dirty"); + swap_hash = simple_hash("swap"); + pgpgin_hash = simple_hash("pgpgin"); + pgpgout_hash = simple_hash("pgpgout"); + pgfault_hash = simple_hash("pgfault"); + pgmajfault_hash = simple_hash("pgmajfault"); + inactive_anon_hash = simple_hash("inactive_anon"); + active_anon_hash = simple_hash("active_anon"); + inactive_file_hash = simple_hash("inactive_file"); + active_file_hash = simple_hash("active_file"); + unevictable_hash = simple_hash("unevictable"); + hierarchical_memory_limit_hash = simple_hash("hierarchical_memory_limit"); + total_cache_hash = simple_hash("total_cache"); + total_rss_hash = simple_hash("total_rss"); + total_rss_huge_hash = simple_hash("total_rss_huge"); + total_mapped_file_hash = simple_hash("total_mapped_file"); + total_writeback_hash = simple_hash("total_writeback"); + total_dirty_hash = simple_hash("total_dirty"); + total_swap_hash = simple_hash("total_swap"); + total_pgpgin_hash = simple_hash("total_pgpgin"); + total_pgpgout_hash = simple_hash("total_pgpgout"); + total_pgfault_hash = simple_hash("total_pgfault"); + total_pgmajfault_hash = simple_hash("total_pgmajfault"); + total_inactive_anon_hash = simple_hash("total_inactive_anon"); + total_active_anon_hash = simple_hash("total_active_anon"); + total_inactive_file_hash = simple_hash("total_inactive_file"); + total_active_file_hash = simple_hash("total_active_file"); + total_unevictable_hash = simple_hash("total_unevictable"); + + // cache functions + (void)simple_hash2("hello world"); + (void)strcmp("1", "2"); + (void)strtoull("123", NULL, 0); + + unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7 = 0, c8 = 0, c9 = 0; + unsigned long max = 1000000; + + begin_clock(); + for(i = 0; i <= max ;i++) test1(); + c1 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test2(); + c2 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test3(); + c3 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test4(); + c4 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test5(); + c5 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test6(); + c6 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test7(); + c7 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test8(); + c8 = end_clock(); + + begin_clock(); + for(i = 0; i <= max ;i++) test9(); + c9 = end_clock(); + + for(i = 0; i < 11 ; i++) + printf("value %lu: %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i], values7[i], values8[i], values9[i]); + + printf("\n\nRESULTS\n"); + printf("test1() [1] in %lu usecs: simple system strcmp().\n" + "test2() [4] in %lu usecs: inline simple_hash() with system strtoull().\n" + "test3() [5] in %lu usecs: statement expression simple_hash(), system strtoull().\n" + "test4() [6] in %lu usecs: inline simple_hash(), if-continue checks.\n" + "test5() [7] in %lu usecs: inline simple_hash(), if-else-if-else-if (netdata default prior to ARL).\n" + "test6() [8] in %lu usecs: adaptive re-sortable array with strtoull() (wow!)\n" + "test7() [9] in %lu usecs: adaptive re-sortable array with str2ull() (wow!)\n" + "test8() [2] in %lu usecs: nested loop with strtoull()\n" + "test9() [3] in %lu usecs: nested loop with str2ull()\n" + , c1 + , c2 + , c3 + , c4 + , c5 + , c6 + , c7 + , c8 + , c9 + ); + + return 0; +} diff --git a/tests/profile/statsd-stress.c b/tests/profile/statsd-stress.c new file mode 100644 index 000000000..435d58d5c --- /dev/null +++ b/tests/profile/statsd-stress.c @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void diep(char *s) +{ + perror(s); + exit(1); +} + +size_t run_threads = 1; +size_t metrics = 1024; + +#define SERVER_IP "127.0.0.1" +#define PORT 8125 + +size_t myrand(size_t max) { + size_t loops = max / RAND_MAX; + size_t i; + + size_t ret = rand(); + for(i = 0; i < loops ;i++) + ret += rand(); + + return ret % max; +} + +struct thread_data { + size_t id; + struct sockaddr_in *si_other; + int slen; + size_t counter; +}; + +static void *report_thread(void *__data) { + struct thread_data *data = (struct thread_data *)__data; + + size_t last = 0; + for (;;) { + size_t i; + size_t total = 0; + for(i = 0; i < run_threads ;i++) + total += data[i].counter; + + printf("%zu metrics/s\n", total-last); + last = total; + + sleep(1); + printf("\033[F\033[J"); + } + + return NULL; +} + +char *types[] = {"g", "c", "m", "ms", "h", "s", NULL}; +// char *types[] = {"g", "c", "C", "h", "ms", NULL}; // brubeck compatible + +static void *spam_thread(void *__data) { + struct thread_data *data = (struct thread_data *)__data; + + int s; + char packet[1024]; + + if ((s = socket(AF_INET, SOCK_DGRAM, 0))==-1) + diep("socket"); + + char **packets = malloc(sizeof(char *) * metrics); + size_t i, *lengths = malloc(sizeof(size_t) * metrics); + size_t t; + + for(i = 0, t = 0; i < metrics ;i++, t++) { + if(!types[t]) t = 0; + char *type = types[t]; + + lengths[i] = sprintf(packet, "stress.%s.t%zu.m%zu:%zu|%s", type, data->id, i, myrand(metrics), type); + packets[i] = strdup(packet); + // printf("packet %zu, of length %zu: '%s'\n", i, lengths[i], packets[i]); + } + //printf("\n"); + + for (;;) { + for(i = 0; i < metrics ;i++) { + if (sendto(s, packets[i], lengths[i], 0, (void *)data->si_other, data->slen) < 0) { + printf("C ==> DROPPED\n"); + return NULL; + } + data->counter++; + } + } + + free(packets); + free(lengths); + close(s); + return NULL; +} + +int main(int argc, char *argv[]) +{ + if (argc != 5) { + fprintf(stderr, "Usage: '%s THREADS METRICS IP PORT'\n", argv[0]); + exit(-1); + } + + run_threads = atoi(argv[1]); + metrics = atoi(argv[2]); + char *ip = argv[3]; + int port = atoi(argv[4]); + + struct thread_data data[run_threads]; + struct sockaddr_in si_other; + pthread_t threads[run_threads], report; + size_t i; + + srand(time(NULL)); + + memset(&si_other, 0, sizeof(si_other)); + si_other.sin_family = AF_INET; + si_other.sin_port = htons(port); + if (inet_aton(ip, &si_other.sin_addr)==0) { + fprintf(stderr, "inet_aton() of ip '%s' failed\n", ip); + exit(1); + } + + for (i = 0; i < run_threads; ++i) { + data[i].id = i; + data[i].si_other = &si_other; + data[i].slen = sizeof(si_other); + data[i].counter = 0; + pthread_create(&threads[i], NULL, spam_thread, &data[i]); + } + + printf("\n"); + printf("THREADS : %zu\n", run_threads); + printf("METRICS : %zu\n", metrics); + printf("DESTINATION : %s:%d\n", ip, port); + printf("\n"); + pthread_create(&report, NULL, report_thread, &data); + + for (i =0; i < run_threads; ++i) + pthread_join(threads[i], NULL); + + return 0; +} diff --git a/tests/profile/test-eval.c b/tests/profile/test-eval.c new file mode 100644 index 000000000..144381cf0 --- /dev/null +++ b/tests/profile/test-eval.c @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ + +/* + * 1. build netdata (as normally) + * 2. cd profile/ + * 3. compile with: + * gcc -O1 -ggdb -Wall -Wextra -I ../src/ -I ../ -o test-eval test-eval.c ../src/log.o ../src/eval.o ../src/common.o ../src/clocks.o ../src/web_buffer.o ../src/storage_number.o -pthread -lm + */ + +#include "config.h" +#include "libnetdata/libnetdata.h" +#include "database/rrdcalc.h" + +void netdata_cleanup_and_exit(int ret) { exit(ret); } + +/* +void indent(int level, int show) { + int i = level; + while(i--) printf(" | "); + if(show) printf(" \\_ "); + else printf(" \\_ "); +} + +void print_node(EVAL_NODE *op, int level); + +void print_value(EVAL_VALUE *v, int level) { + indent(level, 0); + + switch(v->type) { + case EVAL_VALUE_INVALID: + printf("value (NOP)\n"); + break; + + case EVAL_VALUE_NUMBER: + printf("value %Lf (NUMBER)\n", v->number); + break; + + case EVAL_VALUE_EXPRESSION: + printf("value (SUB-EXPRESSION)\n"); + print_node(v->expression, level+1); + break; + + default: + printf("value (INVALID type %d)\n", v->type); + break; + + } +} + +void print_node(EVAL_NODE *op, int level) { + +// if(op->operator != EVAL_OPERATOR_NOP) { + indent(level, 1); + if(op->operator) printf("%c (node %d, precedence: %d)\n", op->operator, op->id, op->precedence); + else printf("NOP (node %d, precedence: %d)\n", op->id, op->precedence); +// } + + int i = op->count; + while(i--) print_value(&op->ops[i], level + 1); +} + +calculated_number evaluate(EVAL_NODE *op, int depth); + +calculated_number evaluate_value(EVAL_VALUE *v, int depth) { + switch(v->type) { + case EVAL_VALUE_NUMBER: + return v->number; + + case EVAL_VALUE_EXPRESSION: + return evaluate(v->expression, depth); + + default: + fatal("I don't know how to handle EVAL_VALUE type %d", v->type); + } +} + +void print_depth(int depth) { + static int count = 0; + + printf("%d. ", ++count); + while(depth--) printf(" "); +} + +calculated_number evaluate(EVAL_NODE *op, int depth) { + calculated_number n1, n2, r; + + switch(op->operator) { + case EVAL_OPERATOR_SIGN_PLUS: + r = evaluate_value(&op->ops[0], depth); + break; + + case EVAL_OPERATOR_SIGN_MINUS: + r = -evaluate_value(&op->ops[0], depth); + break; + + case EVAL_OPERATOR_PLUS: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 + n2; + print_depth(depth); + printf("%Lf = %Lf + %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_MINUS: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 - n2; + print_depth(depth); + printf("%Lf = %Lf - %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_MULTIPLY: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 * n2; + print_depth(depth); + printf("%Lf = %Lf * %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_DIVIDE: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 / n2; + print_depth(depth); + printf("%Lf = %Lf / %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_NOT: + n1 = evaluate_value(&op->ops[0], depth); + r = !n1; + print_depth(depth); + printf("%Lf = NOT %Lf\n", r, n1); + break; + + case EVAL_OPERATOR_AND: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 && n2; + print_depth(depth); + printf("%Lf = %Lf AND %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_OR: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 || n2; + print_depth(depth); + printf("%Lf = %Lf OR %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_GREATER_THAN_OR_EQUAL: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 >= n2; + print_depth(depth); + printf("%Lf = %Lf >= %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_LESS_THAN_OR_EQUAL: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 <= n2; + print_depth(depth); + printf("%Lf = %Lf <= %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_GREATER: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 > n2; + print_depth(depth); + printf("%Lf = %Lf > %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_LESS: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 < n2; + print_depth(depth); + printf("%Lf = %Lf < %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_NOT_EQUAL: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 != n2; + print_depth(depth); + printf("%Lf = %Lf <> %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_EQUAL: + if(op->count != 2) + fatal("Operator '%c' requires 2 values, but we have %d", op->operator, op->count); + n1 = evaluate_value(&op->ops[0], depth); + n2 = evaluate_value(&op->ops[1], depth); + r = n1 == n2; + print_depth(depth); + printf("%Lf = %Lf == %Lf\n", r, n1, n2); + break; + + case EVAL_OPERATOR_EXPRESSION_OPEN: + printf("BEGIN SUB-EXPRESSION\n"); + r = evaluate_value(&op->ops[0], depth + 1); + printf("END SUB-EXPRESSION\n"); + break; + + case EVAL_OPERATOR_NOP: + case EVAL_OPERATOR_VALUE: + r = evaluate_value(&op->ops[0], depth); + break; + + default: + error("I don't know how to handle operator '%c'", op->operator); + r = 0; + break; + } + + return r; +} + + +void print_expression(EVAL_NODE *op, const char *failed_at, int error) { + if(op) { + printf("expression tree:\n"); + print_node(op, 0); + + printf("\nevaluation steps:\n"); + evaluate(op, 0); + + int error; + calculated_number ret = expression_evaluate(op, &error); + printf("\ninternal evaluator:\nSTATUS: %d, RESULT = %Lf\n", error, ret); + + expression_free(op); + } + else { + printf("error: %d, failed_at: '%s'\n", error, (failed_at)?failed_at:""); + } +} +*/ + +int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result) { + (void)variable; + (void)hash; + (void)rc; + (void)result; + + return 0; +} + +int main(int argc, char **argv) { + if(argc != 2) { + fprintf(stderr, "I need an epxression (enclose it in single-quotes (') as a single parameter)\n"); + exit(1); + } + + const char *failed_at = NULL; + int error; + + EVAL_EXPRESSION *exp = expression_parse(argv[1], &failed_at, &error); + if(!exp) + printf("\nPARSING FAILED\nExpression: '%s'\nParsing stopped at: '%s'\nParsing error code: %d (%s)\n", argv[1], (failed_at)?((*failed_at)?failed_at:""):"", error, expression_strerror(error)); + + else { + printf("\nPARSING OK\nExpression: '%s'\nParsed as : '%s'\nParsing error code: %d (%s)\n", argv[1], exp->parsed_as, error, expression_strerror(error)); + + if(expression_evaluate(exp)) { + printf("\nEvaluates to: %Lf\n\n", exp->result); + } + else { + printf("\nEvaluation failed with code %d and message: %s\n\n", exp->error, buffer_tostring(exp->error_msg)); + } + expression_free(exp); + } + + return 0; +} diff --git a/web/Makefile.in b/web/Makefile.in deleted file mode 100644 index 661f819c5..000000000 --- a/web/Makefile.in +++ /dev/null @@ -1,652 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - api \ - gui \ - server \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - gui/confluence/README.md \ - gui/custom/README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/README.md b/web/README.md index 8e59ca5fd..c110ef651 100644 --- a/web/README.md +++ b/web/README.md @@ -1,22 +1,22 @@ -# Web Dashboards Overview +# Web dashboards overview The default port is 19999; for example, to access the dashboard on localhost, use: http://localhost:19999 -To view netdata collected data you access its **[REST API v1](api/)**. +To view Netdata collected data you access its **[REST API v1](api/)**. -For our convenience, netdata provides 2 more layers: +For our convenience, Netdata provides 2 more layers: 1. The `dashboard.js` javascript library that allows us to design custom dashboards using plain HTML. For information on creating custom dashboards, see **[Custom Dashboards](gui/custom/)** and **[Atlassian Confluence Dashboards](gui/confluence/)** -2. Ready to be used web dashboards that render all the charts a netdata server maintains. +2. Ready to be used web dashboards that render all the charts a Netdata server maintains. -## customizing the standard dashboards +## Customizing the standard dashboards Charts information is stored at /usr/share/netdata/web/[dashboard_info.js](gui/dashboard_info.js). This file includes information that is rendered on the dashboard, controls chart colors, section and subsection heading, titles, etc. -If you change that file, your changes will be overwritten when netdata is updated. You can preserve your settings by creating a new such file (there is /usr/share/netdata/web/[dashboard_info_custom.example.js](gui/dashboard_info_custom_example.js) you can use to start with). +If you change that file, your changes will be overwritten when Netdata is updated. You can preserve your settings by creating a new such file (there is /usr/share/netdata/web/[dashboard_info_custom.example.js](gui/dashboard_info_custom_example.js) you can use to start with). -You have to copy the example file under a new name, so that it will not be overwritten with netdata updates. +You have to copy the example file under a new name, so that it will not be overwritten with Netdata updates. To configure your info file set in netdata.conf: @@ -24,3 +24,5 @@ To configure your info file set in netdata.conf: [web] custom dashboard_info.js = your_file_name.js ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/Makefile.am b/web/api/Makefile.am index 5755612c8..0f5481759 100644 --- a/web/api/Makefile.am +++ b/web/api/Makefile.am @@ -8,6 +8,7 @@ SUBDIRS = \ queries \ exporters \ formatters \ + health \ $(NULL) dist_noinst_DATA = \ diff --git a/web/api/Makefile.in b/web/api/Makefile.in deleted file mode 100644 index 7bcd57e1a..000000000 --- a/web/api/Makefile.in +++ /dev/null @@ -1,709 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) $(dist_web_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(webdir)" -DATA = $(dist_noinst_DATA) $(dist_web_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - badges \ - queries \ - exporters \ - formatters \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -dist_web_DATA = \ - netdata-swagger.yaml \ - netdata-swagger.json \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_webDATA: $(dist_web_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webdir)" || exit $$?; \ - done - -uninstall-dist_webDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webdir)'; $(am__uninstall_files_from_dir) - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(webdir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: install-dist_webDATA - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-dist_webDATA - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dist_webDATA install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-ps install-ps-am \ - install-strip installcheck installcheck-am installdirs \ - installdirs-am maintainer-clean maintainer-clean-generic \ - mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags \ - tags-am uninstall uninstall-am uninstall-dist_webDATA - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/README.md b/web/api/README.md index 813998016..44afbc90d 100644 --- a/web/api/README.md +++ b/web/api/README.md @@ -17,3 +17,5 @@ Check this [single chart, jsfiddle example](https://jsfiddle.net/ktsaou/ensu4uws and this [multi chart, jsfiddle example](https://jsfiddle.net/ktsaou/L5y2eqp2/): ![image](https://cloud.githubusercontent.com/assets/2662304/23824766/31a4a68c-0685-11e7-8429-8327cab64be2.png) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/badges/Makefile.in b/web/api/badges/Makefile.in deleted file mode 100644 index 79a3a17a2..000000000 --- a/web/api/badges/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/badges -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/badges/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/badges/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/badges/README.md b/web/api/badges/README.md index 11d04d064..6884cc11a 100644 --- a/web/api/badges/README.md +++ b/web/api/badges/README.md @@ -261,24 +261,6 @@ character|name|escape sequence ` \ `|backslash (when you need a `/`)|`%5C` ` \| `|pipe (delimiting parameters)|`%7C` ---- - -## Using the path instead of the query string - -The badges can also be generated using the URL path for passing parameters. The format is exactly the same. - -So instead of: - - `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...` - -you can write: - - `http://your.netdata:19999/api/v1/badge.svg/option1/option2/option3/...` - -You can also append anything else you like, like this: - - `http://your.netdata:19999/api/v1/badge.svg/option1/option2/option3/my-super-badge.svg` - ## FAQ #### Is it fast? @@ -322,3 +304,5 @@ You can refresh them from your browser console though. Press F12 to open the web ```js var len = document.images.length; while(len--) { document.images[len].src = document.images[len].src.replace(/\?cacheBuster=\d*/, "") + "?cacheBuster=" + new Date().getTime().toString(); }; ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fbadges%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/badges/web_buffer_svg.c b/web/api/badges/web_buffer_svg.c index d0600359e..b24fddedf 100644 --- a/web/api/badges/web_buffer_svg.c +++ b/web/api/badges/web_buffer_svg.c @@ -913,7 +913,7 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u uint32_t options = 0x00000000; while(url) { - char *value = mystrsep(&url, "/?&"); + char *value = mystrsep(&url, "&"); if(!value || !*value) continue; char *name = mystrsep(&value, "="); diff --git a/web/api/exporters/Makefile.in b/web/api/exporters/Makefile.in deleted file mode 100644 index 50e4a6cda..000000000 --- a/web/api/exporters/Makefile.in +++ /dev/null @@ -1,649 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/exporters -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - shell \ - prometheus \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/exporters/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/exporters/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/exporters/README.md b/web/api/exporters/README.md index 02e04abbf..ff711d7e1 100644 --- a/web/api/exporters/README.md +++ b/web/api/exporters/README.md @@ -1,3 +1,5 @@ # Exporters TBD + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/exporters/allmetrics.c b/web/api/exporters/allmetrics.c index a426db0cc..91bb0f921 100644 --- a/web/api/exporters/allmetrics.c +++ b/web/api/exporters/allmetrics.c @@ -24,7 +24,7 @@ inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client const char *prometheus_prefix = global_backend_prefix; while(url) { - char *value = mystrsep(&url, "?&"); + char *value = mystrsep(&url, "&"); if (!value || !*value) continue; char *name = mystrsep(&value, "="); diff --git a/web/api/exporters/prometheus/Makefile.in b/web/api/exporters/prometheus/Makefile.in deleted file mode 100644 index 202f19b3d..000000000 --- a/web/api/exporters/prometheus/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/exporters/prometheus -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/exporters/prometheus/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/exporters/prometheus/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/exporters/prometheus/README.md b/web/api/exporters/prometheus/README.md index 437f90c53..88e79ecd6 100644 --- a/web/api/exporters/prometheus/README.md +++ b/web/api/exporters/prometheus/README.md @@ -1,3 +1,5 @@ # prometheus exporter The prometheus exporter for netdata is located at the [backends section for prometheus](../../../../backends/prometheus). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/exporters/shell/Makefile.in b/web/api/exporters/shell/Makefile.in deleted file mode 100644 index 40a3c2b49..000000000 --- a/web/api/exporters/shell/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/exporters/shell -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/exporters/shell/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/exporters/shell/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/exporters/shell/README.md b/web/api/exporters/shell/README.md index 065afff5c..ab412ebaa 100644 --- a/web/api/exporters/shell/README.md +++ b/web/api/exporters/shell/README.md @@ -62,3 +62,5 @@ NETDATA_${chart_id^^}_${dimension_id^^}="${value}" ``` The value is rounded to the closest integer, since shell script cannot process decimal numbers. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fexporters%2Fshell%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/formatters/Makefile.in b/web/api/formatters/Makefile.in deleted file mode 100644 index a2ea0e53c..000000000 --- a/web/api/formatters/Makefile.in +++ /dev/null @@ -1,651 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/formatters -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - csv \ - json \ - ssv \ - value \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/formatters/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/formatters/README.md b/web/api/formatters/README.md index ad0df2d46..b4ce1e30d 100644 --- a/web/api/formatters/README.md +++ b/web/api/formatters/README.md @@ -70,3 +70,5 @@ For example, to download a CSV file with CPU utilization of the last hour, This is done by appending `&tqx=outFileName:FILENAME` to any data query. The output will be in the format given with `&format=`. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/formatters/csv/Makefile.in b/web/api/formatters/csv/Makefile.in deleted file mode 100644 index 9ba808443..000000000 --- a/web/api/formatters/csv/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/formatters/csv -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/csv/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/formatters/csv/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/formatters/csv/README.md b/web/api/formatters/csv/README.md index 4711dcbd1..995e740b8 100644 --- a/web/api/formatters/csv/README.md +++ b/web/api/formatters/csv/README.md @@ -137,3 +137,5 @@ time|started 2018-10-27 03:45:00|205.001551 2018-10-27 03:44:00|7026.9852167 2018-10-27 03:43:00|205.9904794 + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fcsv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/formatters/json/Makefile.in b/web/api/formatters/json/Makefile.in deleted file mode 100644 index 5289347d0..000000000 --- a/web/api/formatters/json/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/formatters/json -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/json/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/formatters/json/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/formatters/json/README.md b/web/api/formatters/json/README.md index 4f0bad5ca..033bf8eb0 100644 --- a/web/api/formatters/json/README.md +++ b/web/api/formatters/json/README.md @@ -148,3 +148,5 @@ google.visualization.Query.setResponse({version:'0.6',reqId:'0',status:'ok',sig: }}); ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/formatters/ssv/Makefile.in b/web/api/formatters/ssv/Makefile.in deleted file mode 100644 index c7c8c3fa9..000000000 --- a/web/api/formatters/ssv/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/formatters/ssv -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/ssv/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/formatters/ssv/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/formatters/ssv/README.md b/web/api/formatters/ssv/README.md index 74f0d3189..a2892999f 100644 --- a/web/api/formatters/ssv/README.md +++ b/web/api/formatters/ssv/README.md @@ -50,3 +50,5 @@ in a JSON array: # curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&format=array&after=-3600&points=12&group=max' [278,258,268,239,259,260,243,266,278,318,264,258] ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fssv%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/formatters/value/Makefile.in b/web/api/formatters/value/Makefile.in deleted file mode 100644 index eb0b2bddf..000000000 --- a/web/api/formatters/value/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/formatters/value -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/formatters/value/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/formatters/value/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/formatters/value/README.md b/web/api/formatters/value/README.md index 5024e74ee..50974de6d 100644 --- a/web/api/formatters/value/README.md +++ b/web/api/formatters/value/README.md @@ -15,3 +15,5 @@ option|supported|description The Value formatter is not exposed by the API by itself. Instead it is used by the [`ssv`](../ssv) formatter and [health monitoring queries](../../../../health). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fformatters%2Fvalue%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/health/Makefile.am b/web/api/health/Makefile.am new file mode 100644 index 000000000..19554bed8 --- /dev/null +++ b/web/api/health/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/health/README.md b/web/api/health/README.md new file mode 100644 index 000000000..2003a61e0 --- /dev/null +++ b/web/api/health/README.md @@ -0,0 +1,163 @@ +# Health API Calls + +## Health Read API + +### Enabled Alarms + +NetData enables alarms on demand, i.e. when the chart they should be linked to starts collecting data. So, although many more alarms are configured, only the useful ones are enabled. + +To get the list of all enabled alarms: + +`http://your.netdata.ip:19999/api/v1/alarms?all` + +### Raised Alarms + +This API call will return the alarms currently in WARNING or CRITICAL state. + +`http://your.netdata.ip:19999/api/v1/alarms` + +### Event Log + +The size of the alarm log is configured in `netdata.conf`. There are 2 settings: the rotation of the alarm log file and the in memory size of the alarm log. + +``` +[health] + in memory max health log entries = 1000 + rotate log every lines = 2000 +``` + +The API call retrieves all entries of the alarm log: + +`http://your.netdata.ip:19999/api/v1/alarm_log` + +### Alarm Log Incremental Updates + +`http://your.netdata.ip:19999/api/v1/alarm_log?after=UNIQUEID` + +The above returns all the events in the alarm log that occurred after UNIQUEID (you poll it once without `after=`, remember the last UNIQUEID of the returned set, which you give back to get incrementally the next events). + +### Alarm badges + +The following will return an SVG badge of the alarm named `NAME`, attached to the chart named `CHART`. + +`http://your.netdata.ip:19999/api/v1/badge.svg?alarm=NAME&chart=CHART` + +## Health Management API + +Netdata v1.12 and beyond provides a command API to control health checks and notifications at runtime. The feature is especially useful for maintenance periods, during which you receive meaningless alarms. + +Specifically, the API allows you to: + - Disable health checks completely. Alarm conditions will not be evaluated at all and no entries will be added to the alarm log. + - Silence alarm notifications. Alarm conditions will be evaluated, the alarms will appear in the log and the netdata UI will show the alarms as active, but no notifications will be sent. + - Disable or Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. + +The API is available by default, but it is protected by an `api authorization token` that is stored in the file you will see in the following entry of `http://localhost:19999/netdata.conf`: + +```bash +[registry] + # netdata management api key file = /var/lib/netdata/netdata.api.key +``` + +You can access the API via GET requests, by adding the bearer token to an `Authorization` http header, like this: + +``` +curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" +``` + +The command `RESET` just returns netdata to the default operation, with all health checks and notifications enabled. +If you've configured and entered your token correclty, you should see the plain text response `All health checks and notifications are enabled`. + +### Disable or silence all alarms + +If all you need is temporarily disable all health checks, then you issue the following before your maintenance period starts: +``` +curl "http://myserver/api/v1/manage/health?cmd=DISABLE ALL" -H "X-Auth-Token: Mytoken" +``` +The effect of disabling health checks is that the alarm criteria are not evaluated at all and nothing is written in the alarm log. +If you want the health checks to be running but to not receive any notifications during your maintenance period, you can instead use this: + +``` +curl "http://myserver/api/v1/manage/health?cmd=SILENCE ALL" -H "X-Auth-Token: Mytoken" +``` + +Alarms may then still be raised and logged in netdata, so you'll be able to see them via the UI. + +Regardless of the option you choose, at the end of your maintenance period you revert to the normal state via the RESET command. + +``` + curl "http://myserver/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" +``` + +### Disable or silence specific alarms + +If you do not wish to disable/silence all alarms, then the `DISABLE ALL` and `SILENCE ALL` commands can't be used. +Instead, the following commands expect that one or more alarm selectors will be added, so that only alarms that match the selectors are disabled or silenced. +- `DISABLE` : Set the mode to disable health checks. +- `SILENCE` : Set the mode to silence notifications. + +You will normally put one of these commands in the same request with your first alarm selector, but it's possible to issue them separately as well. +You will get a warning in the response, if a selector was added without a SILENCE/DISABLE command, or vice versa. + +Each request can specify a single alarm `selector`, with one or more `selection criteria`. +A single alarm will match a `selector` if all selection criteria match the alarm. +You can add as many selectors as you like. +In essence, the rule is: IF (alarm matches all the criteria in selector1 OR all the criteria in selector2 OR ...) THEN apply the DISABLE or SILENCE command. + +To clear all selectors and reset the mode to default, use the `RESET` command. + +The following example silences notifications for all the alarms with context=load: + +``` +curl "http://myserver/api/v1/manage/health?cmd=SILENCE&context=load" -H "X-Auth-Token: Mytoken" +``` + +#### Selection criteria + +The `selection criteria` are key/value pairs, in the format `key : value`, where value is a netdata [simple pattern](../../../libnetdata/simple_pattern/). This means that you can create very powerful selectors (you will rarely need more than one or two). + +The accepted keys for the `selection criteria` are the following: +- `alarm` : The expression provided will match both `alarm` and `template` names. +- `chart` : Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`. +- `context` : Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`. +- `hosts` : The hostnames that will need to match. +- `families` : The alarm families. + +You can add any of the selection criteria you need on the request, to ensure that only the alarms you are interested in are matched and disabled/silenced. e.g. there is no reason to add `hosts: *`, if you want the criteria to be applied to alarms for all hosts. + +Example 1: Disable all health checks for context = `random` + +``` +http://localhost/api/v1/manage/health?cmd=DISABLE&context=random +``` + +Example 2: Silence all alarms and templates with name starting with `out_of` on host `myhost` + +``` +http://localhost/api/v1/manage/health?cmd=SILENCE&alarm=out_of*&hosts=myhost +``` + +Example 2.2: Add one more selector, to also silence alarms for cpu1 and cpu2 + +``` +http://localhost/api/v1/manage/health?families=cpu1 cpu2 +``` + +### Responses + +- "Auth Error" : Token authentication failed +- "All alarm notifications are silenced" : Successful response to cmd=SILENCE ALL +- "All health checks are disabled" : Successful response to cmd=DISABLE ALL +- "All health checks and notifications are enabled" : Successful response to cmd=RESET +- "Health checks disabled for alarms matching the selectors" : Added to the response for a cmd=DISABLE +- "Alarm notifications silenced for alarms matching the selectors" : Added to the response for a cmd=SILENCE +- "Alarm selector added" : Added to the response when a new selector is added +- "Invalid key. Ignoring it." : Wrong name of a parameter. Added to the response and ignored. +- "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command." : Added to the response if a selector is added without a selector-specific command. +- "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors." : Added to the response if a selector-specific command is issued without a selector. + +### Further reading + +The test script under [tests/health_mgmtapi](../../../tests/health_mgmtapi) contains a series of tests that you can either run or read through to understand the various calls and responses better. + + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fhealth%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/health/health_cmdapi.c b/web/api/health/health_cmdapi.c new file mode 100644 index 000000000..ec177751b --- /dev/null +++ b/web/api/health/health_cmdapi.c @@ -0,0 +1,166 @@ +// +// Created by christopher on 11/12/18. +// + +#include "health_cmdapi.h" + + +static SILENCER *create_silencer(void) { + SILENCER *t = callocz(1, sizeof(SILENCER)); + debug(D_HEALTH, "HEALTH command API: Created empty silencer"); + + return t; +} + +void free_silencers(SILENCER *t) { + if (!t) return; + if (t->next) free_silencers(t->next); + debug(D_HEALTH, "HEALTH command API: Freeing silencer %s:%s:%s:%s:%s", t->alarms, + t->charts, t->contexts, t->hosts, t->families); + simple_pattern_free(t->alarms_pattern); + simple_pattern_free(t->charts_pattern); + simple_pattern_free(t->contexts_pattern); + simple_pattern_free(t->hosts_pattern); + simple_pattern_free(t->families_pattern); + freez(t->alarms); + freez(t->charts); + freez(t->contexts); + freez(t->hosts); + freez(t->families); + freez(t); + return; +} + + + +int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url) { + int ret = 400; + (void) host; + + + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->contenttype = CT_TEXT_PLAIN; + + buffer_flush(w->response.data); + + static uint32_t + hash_alarm = 0, + hash_template = 0, + hash_chart = 0, + hash_context = 0, + hash_host = 0, + hash_families = 0; + + if (unlikely(!hash_alarm)) { + hash_alarm = simple_uhash(HEALTH_ALARM_KEY); + hash_template = simple_uhash(HEALTH_TEMPLATE_KEY); + hash_chart = simple_uhash(HEALTH_CHART_KEY); + hash_context = simple_uhash(HEALTH_CONTEXT_KEY); + hash_host = simple_uhash(HEALTH_HOST_KEY); + hash_families = simple_uhash(HEALTH_FAMILIES_KEY); + } + + SILENCER *silencer = NULL; + + if (!w->auth_bearer_token) { + buffer_strcat(wb, HEALTH_CMDAPI_MSG_AUTHERROR); + ret = 403; + } else { + debug(D_HEALTH, "HEALTH command API: Comparing secret '%s' to '%s'", w->auth_bearer_token, api_secret); + if (strcmp(w->auth_bearer_token, api_secret)) { + buffer_strcat(wb, HEALTH_CMDAPI_MSG_AUTHERROR); + ret = 403; + } else { + while (url) { + char *value = mystrsep(&url, "&"); + if (!value || !*value) continue; + + char *key = mystrsep(&value, "="); + if (!key || !*key) continue; + if (!value || !*value) continue; + + debug(D_WEB_CLIENT, "%llu: API v1 health query param '%s' with value '%s'", w->id, key, value); + + // name and value are now the parameters + if (!strcmp(key, "cmd")) { + if (!strcmp(value, HEALTH_CMDAPI_CMD_SILENCEALL)) { + silencers->all_alarms = 1; + silencers->stype = STYPE_SILENCE_NOTIFICATIONS; + buffer_strcat(wb, HEALTH_CMDAPI_MSG_SILENCEALL); + } else if (!strcmp(value, HEALTH_CMDAPI_CMD_DISABLEALL)) { + silencers->all_alarms = 1; + silencers->stype = STYPE_DISABLE_ALARMS; + buffer_strcat(wb, HEALTH_CMDAPI_MSG_DISABLEALL); + } else if (!strcmp(value, HEALTH_CMDAPI_CMD_SILENCE)) { + silencers->stype = STYPE_SILENCE_NOTIFICATIONS; + buffer_strcat(wb, HEALTH_CMDAPI_MSG_SILENCE); + } else if (!strcmp(value, HEALTH_CMDAPI_CMD_DISABLE)) { + silencers->stype = STYPE_DISABLE_ALARMS; + buffer_strcat(wb, HEALTH_CMDAPI_MSG_DISABLE); + } else if (!strcmp(value, HEALTH_CMDAPI_CMD_RESET)) { + silencers->all_alarms = 0; + silencers->stype = STYPE_NONE; + free_silencers(silencers->silencers); + silencers->silencers = NULL; + buffer_strcat(wb, HEALTH_CMDAPI_MSG_RESET); + } + } else { + uint32_t hash = simple_uhash(key); + if (unlikely(silencer == NULL)) { + if ( + (hash == hash_alarm && !strcasecmp(key, HEALTH_ALARM_KEY)) || + (hash == hash_template && !strcasecmp(key, HEALTH_TEMPLATE_KEY)) || + (hash == hash_chart && !strcasecmp(key, HEALTH_CHART_KEY)) || + (hash == hash_context && !strcasecmp(key, HEALTH_CONTEXT_KEY)) || + (hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY)) || + (hash == hash_families && !strcasecmp(key, HEALTH_FAMILIES_KEY)) + ) { + silencer = create_silencer(); + } + } + + if (hash == hash_alarm && !strcasecmp(key, HEALTH_ALARM_KEY)) { + silencer->alarms = strdupz(value); + silencer->alarms_pattern = simple_pattern_create(silencer->alarms, NULL, SIMPLE_PATTERN_EXACT); + } else if (hash == hash_chart && !strcasecmp(key, HEALTH_CHART_KEY)) { + silencer->charts = strdupz(value); + silencer->charts_pattern = simple_pattern_create(silencer->charts, NULL, SIMPLE_PATTERN_EXACT); + } else if (hash == hash_context && !strcasecmp(key, HEALTH_CONTEXT_KEY)) { + silencer->contexts = strdupz(value); + silencer->contexts_pattern = simple_pattern_create(silencer->contexts, NULL, SIMPLE_PATTERN_EXACT); + } else if (hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY)) { + silencer->hosts = strdupz(value); + silencer->hosts_pattern = simple_pattern_create(silencer->hosts, NULL, SIMPLE_PATTERN_EXACT); + } else if (hash == hash_families && !strcasecmp(key, HEALTH_FAMILIES_KEY)) { + silencer->families = strdupz(value); + silencer->families_pattern = simple_pattern_create(silencer->families, NULL, SIMPLE_PATTERN_EXACT); + } else { + buffer_strcat(wb, HEALTH_CMDAPI_MSG_INVALID_KEY); + } + } + + } + if (likely(silencer)) { + // Add the created instance to the linked list in silencers + silencer->next = silencers->silencers; + silencers->silencers = silencer; + debug(D_HEALTH, "HEALTH command API: Added silencer %s:%s:%s:%s:%s", silencer->alarms, + silencer->charts, silencer->contexts, silencer->hosts, silencer->families + ); + buffer_strcat(wb, HEALTH_CMDAPI_MSG_ADDED); + if (silencers->stype == STYPE_NONE) { + buffer_strcat(wb, HEALTH_CMDAPI_MSG_STYPEWARNING); + } + } + if (unlikely(silencers->stype != STYPE_NONE && !silencers->all_alarms && !silencers->silencers)) { + buffer_strcat(wb, HEALTH_CMDAPI_MSG_NOSELECTORWARNING); + } + ret = 200; + } + } + w->response.data = wb; + buffer_no_cacheable(w->response.data); + return ret; +} diff --git a/web/api/health/health_cmdapi.h b/web/api/health/health_cmdapi.h new file mode 100644 index 000000000..d0f30401c --- /dev/null +++ b/web/api/health/health_cmdapi.h @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_HEALTH_SVG_H +#define NETDATA_WEB_HEALTH_SVG_H 1 + +#include "libnetdata/libnetdata.h" +#include "web/server/web_client.h" +#include "health/health.h" + +#define HEALTH_CMDAPI_CMD_SILENCEALL "SILENCE ALL" +#define HEALTH_CMDAPI_CMD_DISABLEALL "DISABLE ALL" +#define HEALTH_CMDAPI_CMD_SILENCE "SILENCE" +#define HEALTH_CMDAPI_CMD_DISABLE "DISABLE" +#define HEALTH_CMDAPI_CMD_RESET "RESET" + +#define HEALTH_CMDAPI_MSG_AUTHERROR "Auth Error\n" +#define HEALTH_CMDAPI_MSG_SILENCEALL "All alarm notifications are silenced\n" +#define HEALTH_CMDAPI_MSG_DISABLEALL "All health checks are disabled\n" +#define HEALTH_CMDAPI_MSG_RESET "All health checks and notifications are enabled\n" +#define HEALTH_CMDAPI_MSG_DISABLE "Health checks disabled for alarms matching the selectors\n" +#define HEALTH_CMDAPI_MSG_SILENCE "Alarm notifications silenced for alarms matching the selectors\n" +#define HEALTH_CMDAPI_MSG_ADDED "Alarm selector added\n" +#define HEALTH_CMDAPI_MSG_INVALID_KEY "Invalid key. Ignoring it.\n" +#define HEALTH_CMDAPI_MSG_STYPEWARNING "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command.\n" +#define HEALTH_CMDAPI_MSG_NOSELECTORWARNING "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors.\n" + +extern int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url); + +#include "web/api/web_api_v1.h" + +#endif /* NETDATA_WEB_HEALTH_SVG_H */ diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json index 8ee1a8a72..ac84b754d 100644 --- a/web/api/netdata-swagger.json +++ b/web/api/netdata-swagger.json @@ -3,7 +3,7 @@ "info": { "title": "NetData API", "description": "Real-time performance and health monitoring.", - "version": "1.9.11_rolling" + "version": "1.11.1_rolling" }, "host": "registry.my-netdata.io", "schemes": [ @@ -15,6 +15,20 @@ "application/json" ], "paths": { + "/info": { + "get": { + "summary": "Get netdata basic information", + "description": "The info endpoint returns basic information about netdata. It provides:\n* netdata version\n* netdata unique id\n* list of hosts mirrored (includes itself)\n* number of alarms in the host\n * number of alarms in normal state\n * number of alarms in warning state\n * number of alarms in critical state\n", + "responses": { + "200": { + "description": "netdata basic information", + "schema": { + "$ref": "#/definitions/info" + } + } + } + } + }, "/charts": { "get": { "summary": "Get a list of all charts available at the server", @@ -531,9 +545,163 @@ } } } + }, + "/alarms": { + "get": { + "summary": "Get a list of active or raised alarms on the server", + "description": "The alarms endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing \"?all\", all the enabled alarms are returned.", + "parameters": [ + { + "name": "all", + "in": "query", + "description": "If passed, all enabled alarms are returned", + "required": false, + "type": "boolean", + "allowEmptyValue": true + } + ], + "responses": { + "200": { + "description": "An object containing general info and a linked list of alarms", + "schema": { + "$ref": "#/definitions/alarms" + } + } + } + } + }, + "/alarm_log": { + "get": { + "summary": "Retrieves the entries of the alarm log", + "description": "Returns an array of alarm_log entries, with historical information on raised and cleared alarms.", + "parameters": [ + { + "name": "after", + "in": "query", + "description": "Passing the parameter after=UNIQUEID returns all the events in the alarm log that occurred after UNIQUEID. An automated series of calls would call the interface once without after=, store the last UNIQUEID of the returned set, and give it back to get incrementally the next events", + "required": false, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "An array of alarm log entries", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/alarm_log_entry" + } + } + } + } + } + }, + "/manage/health": { + "get": { + "summary": "Accesses the health management API to control health checks and notifications at runtime.", + "description": "Available from Netdata v1.12 and above, protected via bearer authorization. Especially useful for maintenance periods, the API allows you to disable health checks completely, silence alarm notifications, or Disable/Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. For the simple disable/silence all scenaria, only the cmd parameter is required. The other parameters are used to define alarm selectors. For more information and examples, refer to the netdata documentation.", + "parameters": [ + { + "name": "cmd", + "in": "query", + "description": "DISABLE ALL: No alarm criteria are evaluated, nothing is written in the alarm log. SILENCE ALL: No notifications are sent. RESET: Return to the default state. DISABLE/SILENCE: Set the mode to be used for the alarms matching the criteria of the alarm selectors.", + "required": false, + "type": "string", + "enum": [ + "DISABLE ALL", + "SILENCE ALL", + "DISABLE", + "SILENCE", + "RESET" + ] + }, + { + "name": "alarm", + "in": "query", + "description": "The expression provided will match both `alarm` and `template` names.", + "type": "string" + }, + { + "name": "chart", + "in": "query", + "description": "Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`", + "type": "string" + }, + { + "name": "context", + "in": "query", + "description": "Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`.", + "type": "string" + }, + { + "name": "hosts", + "in": "query", + "description": "The hostnames that will need to match.", + "type": "string" + }, + { + "name": "families", + "in": "query", + "description": "The alarm families.", + "type": "string" + } + ], + "responses": { + "200": { + "description": "A plain text response based on the result of the command" + }, + "403": { + "description": "Bearer authentication error." + } + } + } } }, "definitions": { + "info": { + "type": "object", + "properties": { + "version": { + "type": "string", + "description": "netdata version of the server.", + "example": "1.11.1_rolling" + }, + "uid": { + "type": "string", + "description": "netdata unique id of the server.", + "example": "24e9fe3c-f2ac-11e8-bafc-0242ac110002" + }, + "mirrored_hosts": { + "type": "array", + "description": "list of hosts mirrored of the server (include itself).", + "items": { + "type": "string" + }, + "example": [ + "host1.example.com", + "host2.example.com" + ] + }, + "alarms": { + "type": "object", + "description": "number of alarms in the server.", + "properties": { + "normal": { + "type": "integer", + "description": "number of alarms in normal state." + }, + "warning": { + "type": "integer", + "description": "number of alarms in warning state." + }, + "critical": { + "type": "integer", + "description": "number of alarms in critical state." + } + } + } + } + }, "chart_summary": { "type": "object", "properties": { @@ -772,6 +940,291 @@ "description": "The result requested, in the format requested." } } + }, + "alarms": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "latest_alarm_log_unique_id": { + "type": "integer", + "format": "int32" + }, + "status": { + "type": "boolean" + }, + "now": { + "type": "integer", + "format": "int32" + }, + "alarms": { + "type": "object", + "properties": { + "chart-name.alarm-name": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int32" + }, + "name": { + "type": "string", + "description": "Full alarm name" + }, + "chart": { + "type": "string" + }, + "family": { + "type": "string" + }, + "active": { + "type": "boolean", + "description": "Will be false only if the alarm is disabled in the configuration" + }, + "disabled": { + "type": "boolean", + "description": "Whether the health check for this alarm has been disabled via a health command API DISABLE command." + }, + "silenced": { + "type": "boolean", + "description": "Whether notifications for this alarm have been silenced via a health command API SILENCE command." + }, + "exec": { + "type": "string" + }, + "recipient": { + "type": "string" + }, + "source": { + "type": "string" + }, + "units": { + "type": "string" + }, + "info": { + "type": "string" + }, + "status": { + "type": "string" + }, + "last_status_change": { + "type": "integer", + "format": "int32" + }, + "last_updated": { + "type": "integer", + "format": "int32" + }, + "next_update": { + "type": "integer", + "format": "int32" + }, + "update_every": { + "type": "integer", + "format": "int32" + }, + "delay_up_duration": { + "type": "integer", + "format": "int32" + }, + "delay_down_duration": { + "type": "integer", + "format": "int32" + }, + "delay_max_duration": { + "type": "integer", + "format": "int32" + }, + "delay_multiplier": { + "type": "integer", + "format": "int32" + }, + "delay": { + "type": "integer", + "format": "int32" + }, + "delay_up_to_timestamp": { + "type": "integer", + "format": "int32" + }, + "value_string": { + "type": "string" + }, + "no_clear_notification": { + "type": "boolean" + }, + "lookup_dimensions": { + "type": "string" + }, + "db_after": { + "type": "integer", + "format": "int32" + }, + "db_before": { + "type": "integer", + "format": "int32" + }, + "lookup_method": { + "type": "string" + }, + "lookup_after": { + "type": "integer", + "format": "int32" + }, + "lookup_before": { + "type": "integer", + "format": "int32" + }, + "lookup_options": { + "type": "string" + }, + "calc": { + "type": "string" + }, + "calc_parsed": { + "type": "string" + }, + "warn": { + "type": "string" + }, + "warn_parsed": { + "type": "string" + }, + "crit": { + "type": "string" + }, + "crit_parsed": { + "type": "string" + }, + "green": { + "type": "string", + "format": "nullable" + }, + "red": { + "type": "string", + "format": "nullable" + }, + "value": { + "type": "number" + } + } + } + } + } + } + }, + "alarm_log_entry": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "unique_id": { + "type": "integer", + "format": "int32" + }, + "alarm_id": { + "type": "integer", + "format": "int32" + }, + "alarm_event_id": { + "type": "integer", + "format": "int32" + }, + "name": { + "type": "string" + }, + "chart": { + "type": "string" + }, + "family": { + "type": "string" + }, + "processed": { + "type": "boolean" + }, + "updated": { + "type": "boolean" + }, + "exec_run": { + "type": "integer", + "format": "int32" + }, + "exec_failed": { + "type": "boolean" + }, + "exec": { + "type": "string" + }, + "recipient": { + "type": "string" + }, + "exec_code": { + "type": "integer", + "format": "int32" + }, + "source": { + "type": "string" + }, + "units": { + "type": "string" + }, + "when": { + "type": "integer", + "format": "int32" + }, + "duration": { + "type": "integer", + "format": "int32" + }, + "non_clear_duration": { + "type": "integer", + "format": "int32" + }, + "status": { + "type": "string" + }, + "old_status": { + "type": "string" + }, + "delay": { + "type": "integer", + "format": "int32" + }, + "delay_up_to_timestamp": { + "type": "integer", + "format": "int32" + }, + "updated_by_id": { + "type": "integer", + "format": "int32" + }, + "updates_id": { + "type": "integer", + "format": "int32" + }, + "value_string": { + "type": "string" + }, + "old_value_string": { + "type": "string" + }, + "silenced": { + "type": "string" + }, + "info": { + "type": "string" + }, + "value": { + "type": "string", + "format": "nullable" + }, + "old_value": { + "type": "string", + "format": "nullable" + } + } } } } \ No newline at end of file diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml index 58f19198f..bf62fb95a 100644 --- a/web/api/netdata-swagger.yaml +++ b/web/api/netdata-swagger.yaml @@ -3,7 +3,7 @@ swagger: '2.0' info: title: NetData API description: 'Real-time performance and health monitoring.' - version: 1.11.0_rolling + version: 1.11.1_rolling host: registry.my-netdata.io schemes: - https @@ -12,6 +12,23 @@ basePath: /api/v1 produces: - application/json paths: + /info: + get: + summary: Get netdata basic information + description: | + The info endpoint returns basic information about netdata. It provides: + * netdata version + * netdata unique id + * list of hosts mirrored (includes itself) + * number of alarms in the host + * number of alarms in normal state + * number of alarms in warning state + * number of alarms in critical state + responses: + '200': + description: netdata basic information + schema: + $ref: '#/definitions/info' /charts: get: summary: 'Get a list of all charts available at the server' @@ -340,7 +357,108 @@ paths: description: 'All the metrics returned in the format requested' '400': description: 'The format requested is not supported' + /alarms: + get: + summary: 'Get a list of active or raised alarms on the server' + description: 'The alarms endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing "?all", all the enabled alarms are returned.' + parameters: + - name: all + in: query + description: 'If passed, all enabled alarms are returned' + required: false + type: boolean + allowEmptyValue: true + responses: + '200': + description: 'An object containing general info and a linked list of alarms' + schema: + $ref: '#/definitions/alarms' + /alarm_log: + get: + summary: 'Retrieves the entries of the alarm log' + description: 'Returns an array of alarm_log entries, with historical information on raised and cleared alarms.' + parameters: + - name: after + in: query + description: 'Passing the parameter after=UNIQUEID returns all the events in the alarm log that occurred after UNIQUEID. An automated series of calls would call the interface once without after=, store the last UNIQUEID of the returned set, and give it back to get incrementally the next events' + required: false + type: integer + responses: + '200': + description: 'An array of alarm log entries' + schema: + type: array + items: + $ref: '#/definitions/alarm_log_entry' + /manage/health: + get: + summary: 'Accesses the health management API to control health checks and notifications at runtime.' + description: 'Available from Netdata v1.12 and above, protected via bearer authorization. Especially useful for maintenance periods, the API allows you to disable health checks completely, silence alarm notifications, or Disable/Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. For the simple disable/silence all scenaria, only the cmd parameter is required. The other parameters are used to define alarm selectors. For more information and examples, refer to the netdata documentation.' + parameters: + - name: cmd + in: query + description: 'DISABLE ALL: No alarm criteria are evaluated, nothing is written in the alarm log. SILENCE ALL: No notifications are sent. RESET: Return to the default state. DISABLE/SILENCE: Set the mode to be used for the alarms matching the criteria of the alarm selectors.' + required: false + type: string + enum: ['DISABLE ALL', 'SILENCE ALL', 'DISABLE', 'SILENCE', 'RESET'] + - name: alarm + in: query + description: 'The expression provided will match both `alarm` and `template` names.' + type: string + - name: chart + in: query + description: 'Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`' + type: string + - name: context + in: query + description: 'Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`.' + type: string + - name: hosts + in: query + description: 'The hostnames that will need to match.' + type: string + - name: families + in: query + description: 'The alarm families.' + type: string + responses: + '200': + description: 'A plain text response based on the result of the command' + '403': + description: 'Bearer authentication error.' definitions: + info: + type: object + properties: + version: + type: string + description: netdata version of the server. + example: 1.11.1_rolling + uid: + type: string + description: netdata unique id of the server. + example: 24e9fe3c-f2ac-11e8-bafc-0242ac110002 + mirrored_hosts: + type: array + description: list of hosts mirrored of the server (include itself). + items: + type: string + example: + - host1.example.com + - host2.example.com + alarms: + type: object + description: number of alarms in the server. + properties: + normal: + type: integer + description: number of alarms in normal state. + warning: + type: integer + description: number of alarms in warning state. + critical: + type: integer + description: number of alarms in critical state. chart_summary: type: object properties: @@ -365,7 +483,7 @@ definitions: description: 'An object containing all the chart objects available at the netdata server. This is used as an indexed array. The key of each chart object is the id of the chart.' properties: key: - $ref: '#/definitions/chart' + $ref: '#/definitions/chart' charts_count: type: number description: 'The number of charts.' @@ -429,7 +547,7 @@ definitions: description: 'An object containing all the chart dimensions available for the chart. This is used as an indexed array. The key of the object the id of the dimension.' properties: key: - $ref: '#/definitions/dimension' + $ref: '#/definitions/dimension' green: type: number description: 'Chart health green threshold' @@ -442,7 +560,6 @@ definitions: name: type: string description: 'The name of the dimension' - json_wrap: type: object properties: @@ -510,3 +627,204 @@ definitions: description: 'The format of the result returned.' result: description: 'The result requested, in the format requested.' + alarms: + type: object + properties: + hostname: + type: string + latest_alarm_log_unique_id: + type: integer + format: int32 + status: + type: boolean + now: + type: integer + format: int32 + alarms: + type: object + properties: + chart-name.alarm-name: + type: object + properties: + id: + type: integer + format: int32 + name: + type: string + description: Full alarm name + chart: + type: string + family: + type: string + active: + type: boolean + description: Will be false only if the alarm is disabled in the configuration + disabled: + type: boolean + description: Whether the health check for this alarm has been disabled via a health command API DISABLE command. + silenced: + type: boolean + description: Whether notifications for this alarm have been silenced via a health command API SILENCE command. + exec: + type: string + recipient: + type: string + source: + type: string + units: + type: string + info: + type: string + status: + type: string + last_status_change: + type: integer + format: int32 + last_updated: + type: integer + format: int32 + next_update: + type: integer + format: int32 + update_every: + type: integer + format: int32 + delay_up_duration: + type: integer + format: int32 + delay_down_duration: + type: integer + format: int32 + delay_max_duration: + type: integer + format: int32 + delay_multiplier: + type: integer + format: int32 + delay: + type: integer + format: int32 + delay_up_to_timestamp: + type: integer + format: int32 + value_string: + type: string + no_clear_notification: + type: boolean + lookup_dimensions: + type: string + db_after: + type: integer + format: int32 + db_before: + type: integer + format: int32 + lookup_method: + type: string + lookup_after: + type: integer + format: int32 + lookup_before: + type: integer + format: int32 + lookup_options: + type: string + calc: + type: string + calc_parsed: + type: string + warn: + type: string + warn_parsed: + type: string + crit: + type: string + crit_parsed: + type: string + green: + type: string + format: nullable + red: + type: string + format: nullable + value: + type: number + alarm_log_entry: + type: object + properties: + hostname: + type: string + unique_id: + type: integer + format: int32 + alarm_id: + type: integer + format: int32 + alarm_event_id: + type: integer + format: int32 + name: + type: string + chart: + type: string + family: + type: string + processed: + type: boolean + updated: + type: boolean + exec_run: + type: integer + format: int32 + exec_failed: + type: boolean + exec: + type: string + recipient: + type: string + exec_code: + type: integer + format: int32 + source: + type: string + units: + type: string + when: + type: integer + format: int32 + duration: + type: integer + format: int32 + non_clear_duration: + type: integer + format: int32 + status: + type: string + old_status: + type: string + delay: + type: integer + format: int32 + delay_up_to_timestamp: + type: integer + format: int32 + updated_by_id: + type: integer + format: int32 + updates_id: + type: integer + format: int32 + value_string: + type: string + old_value_string: + type: string + silenced: + type: string + info: + type: string + value: + type: string + format: nullable + old_value: + type: string + format: nullable diff --git a/web/api/queries/Makefile.in b/web/api/queries/Makefile.in deleted file mode 100644 index 295870d2c..000000000 --- a/web/api/queries/Makefile.in +++ /dev/null @@ -1,656 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - average \ - des \ - incremental_sum \ - max \ - min \ - sum \ - median \ - ses \ - stddev \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/README.md b/web/api/queries/README.md index 7a517d26e..6a55398ac 100644 --- a/web/api/queries/README.md +++ b/web/api/queries/README.md @@ -65,12 +65,12 @@ There are 2 uses that enable this feature: Using `points` and `gtime` the query engine tries to find a best fit for **database-points** vs **result-points** (we call this ratio `group points`). It always tries to keep `group points` -an integer. Keep in mind the query engine may shift `after` if required. +an integer. Keep in mind the query engine may shift `after` if required. See also the [example](#example). #### Time-frame Alignment Alignment is a very important aspect of netdata queries. Without it, the animated -charts on the dashboards would constantly change shape during incremental updates. +charts on the dashboards would constantly [change shape](#example) during incremental updates. To provide consistent grouping through time, the query engine (by default) aligns `after` and `before` to be a multiple of `group points`. @@ -126,3 +126,47 @@ to the caller. The query engine is highly optimized for speed. Most of its modules implement "online" versions of the algorithms, requiring just one pass on the database values to produce the result. + +## Example + +When netdata is reducing metrics, it tries to return always the same boundaries. So, if we want 10s averages, it will always return points starting at a `unix timestamp % 10 = 0`. + +Let's see why this is needed, by looking at the error case. + +Assume we have 5 points: + +| time | value | +| :-: | :-: | +| 00:01 | 1 | +| 00:02 | 2 | +| 00:03 | 3 | +| 00:04 | 4 | +| 00:05 | 5 | + +At 00:04 you ask for 2 points for 4 seconds in the past. So `group = 2`. netdata would return: + +| point | time | value | +| :-: | :-: | :-: | +| 1 | 00:01 - 00:02 | 1.5 | +| 2 | 00:03 - 00:04 | 3.5 | + +A second later the chart is to be refreshed, and makes again the same request at 00:05. These are the points that would have been returned: + +| point | time | value | +| :-: | :-: | :-: | +| 1 | 00:02 - 00:03 | 2.5 | +| 2 | 00:04 - 00:05 | 4.5 | + +**Wait a moment!** The chart was shifted just one point and it changed value! Point 2 was 3.5 and when shifted to point 1 is 2.5! If you see this in a chart, it's a mess. The charts change shape constantly. + +For this reason, netdata always aligns the data it returns to the `group`. + +When you request `points=1`, netdata understands that you need 1 point for the whole database, so `group = 3600`. Then it tries to find the starting point which would be `timestamp % 3600 = 0` Within a database of 3600 seconds, there is one such point for sure. Then it tries to find the average of 3600 points. But, most probably it will not find 3600 of them (for just 1 out of 3600 seconds this query will return something). + +So, the proper way to query the database is to also set at least `after`. The following call will returns 1 point for the last complete 10-second duration (it starts at `timestamp % 10 = 0`): + +http://netdata.firehol.org/api/v1/data?chart=system.cpu&points=1&after=-10&options=seconds + +When you keep calling this URL, you will see that it returns one new value every 10 seconds, and the timestamp always ends with zero. Similarly, if you say `points=1&after=-5` it will always return timestamps ending with 0 or 5. + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/queries/average/Makefile.in b/web/api/queries/average/Makefile.in deleted file mode 100644 index a5db03648..000000000 --- a/web/api/queries/average/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries/average -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/average/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/average/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/average/README.md b/web/api/queries/average/README.md index 3be434f68..873b60a5f 100644 --- a/web/api/queries/average/README.md +++ b/web/api/queries/average/README.md @@ -37,3 +37,5 @@ Examining last 1 minute `successful` web server responses: ## References - [https://en.wikipedia.org/wiki/Average](https://en.wikipedia.org/wiki/Average). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Faverage%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/queries/des/Makefile.in b/web/api/queries/des/Makefile.in deleted file mode 100644 index d3f05587b..000000000 --- a/web/api/queries/des/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries/des -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/des/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/des/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/des/README.md b/web/api/queries/des/README.md index fa546a338..486221cbd 100644 --- a/web/api/queries/des/README.md +++ b/web/api/queries/des/README.md @@ -64,3 +64,5 @@ Examining last 1 minute `successful` web server responses: ## References - [https://en.wikipedia.org/wiki/Exponential_smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fdes%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/queries/incremental_sum/Makefile.in b/web/api/queries/incremental_sum/Makefile.in deleted file mode 100644 index 1863466fe..000000000 --- a/web/api/queries/incremental_sum/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries/incremental_sum -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/incremental_sum/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/incremental_sum/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/incremental_sum/README.md b/web/api/queries/incremental_sum/README.md index ef86d4d41..47f833f38 100644 --- a/web/api/queries/incremental_sum/README.md +++ b/web/api/queries/incremental_sum/README.md @@ -32,3 +32,5 @@ Examining last 1 minute `successful` web server responses: ## References - none + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fincremental_sum%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/queries/max/Makefile.in b/web/api/queries/max/Makefile.in deleted file mode 100644 index f8475ad62..000000000 --- a/web/api/queries/max/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries/max -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/max/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/max/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/max/README.md b/web/api/queries/max/README.md index 2ea14bf0b..34dff0215 100644 --- a/web/api/queries/max/README.md +++ b/web/api/queries/max/README.md @@ -29,3 +29,5 @@ Examining last 1 minute `successful` web server responses: ## References - [https://en.wikipedia.org/wiki/Sample_maximum_and_minimum](https://en.wikipedia.org/wiki/Sample_maximum_and_minimum). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmax%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/queries/median/Makefile.in b/web/api/queries/median/Makefile.in deleted file mode 100644 index f6d471bdf..000000000 --- a/web/api/queries/median/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries/median -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/median/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/median/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/median/README.md b/web/api/queries/median/README.md index eb2dbaa96..72f51a244 100644 --- a/web/api/queries/median/README.md +++ b/web/api/queries/median/README.md @@ -35,3 +35,5 @@ Examining last 1 minute `successful` web server responses: ## References - [https://en.wikipedia.org/wiki/Median](https://en.wikipedia.org/wiki/Median). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmedian%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/queries/min/Makefile.in b/web/api/queries/min/Makefile.in deleted file mode 100644 index 11fa615f6..000000000 --- a/web/api/queries/min/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries/min -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/min/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/min/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/min/README.md b/web/api/queries/min/README.md index 3fe13106a..28f6dbf53 100644 --- a/web/api/queries/min/README.md +++ b/web/api/queries/min/README.md @@ -29,3 +29,5 @@ Examining last 1 minute `successful` web server responses: ## References - [https://en.wikipedia.org/wiki/Sample_maximum_and_minimum](https://en.wikipedia.org/wiki/Sample_maximum_and_minimum). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fmin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/queries/query.c b/web/api/queries/query.c index d03b43d3c..f841b65d5 100644 --- a/web/api/queries/query.c +++ b/web/api/queries/query.c @@ -492,7 +492,7 @@ static inline void do_dimension( r->min = min; r->max = max; r->before = max_date; - r->after = min_date; + r->after = min_date - (r->group - 1) * r->st->update_every; rrdr_done(r, rrdr_line); #ifdef NETDATA_INTERNAL_CHECKS @@ -539,7 +539,7 @@ static void rrd2rrdr_log_request_response_metdata(RRDR *r , resampling_group // after - , (size_t)r->after - (group - 1) * r->st->update_every + , (size_t)r->after , (size_t)after_wanted , (size_t)after_requested , (size_t)rrdset_first_entry_t(r->st) @@ -666,26 +666,41 @@ RRDR *rrd2rrdr( info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, resampling_time_requested, duration); #endif - group = available_points; // use all the points + after_requested = before_requested - resampling_time_requested; + duration = before_requested - after_requested; + available_points = duration / st->update_every; + group = available_points / points_requested; } - else { - // the points we should group to satisfy gtime - resampling_group = resampling_time_requested / st->update_every; - if(unlikely(resampling_time_requested % st->update_every)) { - #ifdef NETDATA_INTERNAL_CHECKS - info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, st->update_every); - #endif - resampling_group++; + // if the duration is not aligned to resampling time + // extend the duration to the past, to avoid a gap at the chart + // only when the missing duration is above 1/10th of a point + if(duration % resampling_time_requested) { + time_t delta = duration % resampling_time_requested; + if(delta > resampling_time_requested / 10) { + after_requested -= resampling_time_requested - delta; + duration = before_requested - after_requested; + available_points = duration / st->update_every; + group = available_points / points_requested; } + } - // adapt group according to resampling_group - if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one - if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group + // the points we should group to satisfy gtime + resampling_group = resampling_time_requested / st->update_every; + if(unlikely(resampling_time_requested % st->update_every)) { + #ifdef NETDATA_INTERNAL_CHECKS + info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, st->update_every); + #endif - //resampling_divisor = group / resampling_group; - resampling_divisor = (calculated_number)(group * st->update_every) / (calculated_number)resampling_time_requested; + resampling_group++; } + + // adapt group according to resampling_group + if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one + if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group + + //resampling_divisor = group / resampling_group; + resampling_divisor = (calculated_number)(group * st->update_every) / (calculated_number)resampling_time_requested; } // now that we have group, @@ -710,7 +725,7 @@ RRDR *rrd2rrdr( // we need to estimate the number of points, for having // an integer number of values per point - long points_wanted = (before_wanted - after_requested) / st->update_every / group; + long points_wanted = (before_wanted - after_requested) / (st->update_every * group); time_t after_wanted = before_wanted - (points_wanted * group * st->update_every) + st->update_every; if(unlikely(after_wanted < first_entry_t)) { @@ -951,7 +966,7 @@ RRDR *rrd2rrdr( rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "got 'before' is not wanted 'before'"); // reported 'after' varies, depending on group - if((r->after - (group - 1) * r->st->update_every) != after_wanted) + if(r->after != after_wanted) rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "got 'after' is not wanted 'after'"); #endif diff --git a/web/api/queries/ses/Makefile.in b/web/api/queries/ses/Makefile.in deleted file mode 100644 index bb538fac3..000000000 --- a/web/api/queries/ses/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries/ses -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/ses/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/ses/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/ses/README.md b/web/api/queries/ses/README.md index b5dacea83..16b153a42 100644 --- a/web/api/queries/ses/README.md +++ b/web/api/queries/ses/README.md @@ -52,3 +52,5 @@ Examining last 1 minute `successful` web server responses: - [https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average](https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average) - [https://en.wikipedia.org/wiki/Exponential_smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fses%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/queries/stddev/Makefile.in b/web/api/queries/stddev/Makefile.in deleted file mode 100644 index b7ccdfdca..000000000 --- a/web/api/queries/stddev/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries/stddev -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/stddev/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/stddev/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/stddev/README.md b/web/api/queries/stddev/README.md index 3436ff834..2ef5a2e3b 100644 --- a/web/api/queries/stddev/README.md +++ b/web/api/queries/stddev/README.md @@ -85,3 +85,5 @@ Examining last 1 minute `successful` web server responses: ## References Check [https://en.wikipedia.org/wiki/Coefficient_of_variation](https://en.wikipedia.org/wiki/Coefficient_of_variation). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fstddev%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/queries/sum/Makefile.in b/web/api/queries/sum/Makefile.in deleted file mode 100644 index 56dfb2d75..000000000 --- a/web/api/queries/sum/Makefile.in +++ /dev/null @@ -1,464 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/api/queries/sum -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/api/queries/sum/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/api/queries/sum/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/api/queries/sum/README.md b/web/api/queries/sum/README.md index be6596080..a74ffff69 100644 --- a/web/api/queries/sum/README.md +++ b/web/api/queries/sum/README.md @@ -32,3 +32,5 @@ Examining last 1 minute `successful` web server responses: ## References - [https://en.wikipedia.org/wiki/Summation](https://en.wikipedia.org/wiki/Summation). + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fapi%2Fqueries%2Fsum%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c index 5c54d52fc..991a9ec8b 100644 --- a/web/api/web_api_v1.c +++ b/web/api/web_api_v1.c @@ -83,6 +83,68 @@ void web_client_api_v1_init(void) { api_v1_data_google_formats[i].hash = simple_hash(api_v1_data_google_formats[i].name); web_client_api_v1_init_grouping(); + + uuid_t uuid; + + // generate + uuid_generate(uuid); + + // unparse (to string) + char uuid_str[37]; + uuid_unparse_lower(uuid, uuid_str); +} + +char *get_mgmt_api_key(void) { + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/netdata.api.key", netdata_configured_varlib_dir); + char *api_key_filename=config_get(CONFIG_SECTION_REGISTRY, "netdata management api key file", filename); + static char guid[GUID_LEN + 1] = ""; + + if(likely(guid[0])) + return guid; + + // read it from disk + int fd = open(api_key_filename, O_RDONLY); + if(fd != -1) { + char buf[GUID_LEN + 1]; + if(read(fd, buf, GUID_LEN) != GUID_LEN) + error("Failed to read management API key from '%s'", api_key_filename); + else { + buf[GUID_LEN] = '\0'; + if(regenerate_guid(buf, guid) == -1) { + error("Failed to validate management API key '%s' from '%s'.", + buf, api_key_filename); + + guid[0] = '\0'; + } + } + close(fd); + } + + // generate a new one? + if(!guid[0]) { + uuid_t uuid; + + uuid_generate_time(uuid); + uuid_unparse_lower(uuid, guid); + guid[GUID_LEN] = '\0'; + + // save it + fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC, 444); + if(fd == -1) + fatal("Cannot create unique management API key file '%s'. Please fix this.", api_key_filename); + + if(write(fd, guid, GUID_LEN) != GUID_LEN) + fatal("Cannot write the unique management API key file '%s'. Please fix this.", api_key_filename); + + close(fd); + } + + return guid; +} + +void web_client_api_v1_management_init(void) { + api_secret = get_mgmt_api_key(); } inline uint32_t web_client_api_request_v1_data_options(char *o) { @@ -136,7 +198,7 @@ inline int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, int all = 0; while(url) { - char *value = mystrsep(&url, "?&"); + char *value = mystrsep(&url, "&"); if (!value || !*value) continue; if(!strcmp(value, "all")) all = 1; @@ -153,7 +215,7 @@ inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client uint32_t after = 0; while(url) { - char *value = mystrsep(&url, "?&"); + char *value = mystrsep(&url, "&"); if (!value || !*value) continue; char *name = mystrsep(&value, "="); @@ -176,7 +238,7 @@ inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client buffer_flush(w->response.data); while(url) { - char *value = mystrsep(&url, "?&"); + char *value = mystrsep(&url, "&"); if(!value || !*value) continue; char *name = mystrsep(&value, "="); @@ -271,7 +333,7 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c uint32_t options = 0x00000000; while(url) { - char *value = mystrsep(&url, "?&"); + char *value = mystrsep(&url, "&"); if(!value || !*value) continue; char *name = mystrsep(&value, "="); @@ -489,7 +551,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client * */ while(url) { - char *value = mystrsep(&url, "?&"); + char *value = mystrsep(&url, "&"); if (!value || !*value) continue; char *name = mystrsep(&value, "="); @@ -615,12 +677,74 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client * } } +static inline void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb) { + int alarm_normal = 0, alarm_warn = 0, alarm_crit = 0; + RRDCALC *rc; + rrdhost_rdlock(host); + for(rc = host->alarms; rc ; rc = rc->next) { + if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) + continue; + + switch(rc->status) { + case RRDCALC_STATUS_WARNING: + alarm_warn++; + break; + case RRDCALC_STATUS_CRITICAL: + alarm_crit++; + break; + default: + alarm_normal++; + } + } + rrdhost_unlock(host); + buffer_sprintf(wb, "\t\t\"normal\": %d,\n", alarm_normal); + buffer_sprintf(wb, "\t\t\"warning\": %d,\n", alarm_warn); + buffer_sprintf(wb, "\t\t\"critical\": %d\n", alarm_crit); +} + +static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) { + RRDHOST *rc; + int count = 0; + rrd_rdlock(); + rrdhost_foreach_read(rc) { + if(count > 0) buffer_strcat(wb, ",\n"); + buffer_sprintf(wb, "\t\t\"%s\"", rc->hostname); + count++; + } + buffer_strcat(wb, "\n"); + rrd_unlock(); +} + +inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url) { + (void)url; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->contenttype = CT_APPLICATION_JSON; + + buffer_strcat(wb, "{\n"); + buffer_sprintf(wb, "\t\"version\": \"%s\",\n", host->program_version); + buffer_sprintf(wb, "\t\"uid\": \"%s\",\n", host->machine_guid); + + buffer_strcat(wb, "\t\"mirrored_hosts\": [\n"); + web_client_api_request_v1_info_mirrored_hosts(wb); + buffer_strcat(wb, "\t],\n"); + + buffer_strcat(wb, "\t\"alarms\": {\n"); + web_client_api_request_v1_info_summary_alarm_statuses(host, wb); + buffer_strcat(wb, "\t}\n"); + + buffer_strcat(wb, "}"); + return 200; +} + static struct api_command { const char *command; uint32_t hash; WEB_CLIENT_ACL acl; int (*callback)(RRDHOST *host, struct web_client *w, char *url); } api_commands[] = { + { "info", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_info }, { "data", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_data }, { "chart", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_chart }, { "charts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_charts }, @@ -635,7 +759,7 @@ static struct api_command { { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_log }, { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_variables }, { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_allmetrics }, - + { "manage/health", 0, WEB_CLIENT_ACL_MGMT, web_client_api_request_v1_mgmt_health }, // terminator { NULL, 0, WEB_CLIENT_ACL_NONE, NULL }, }; @@ -652,14 +776,14 @@ inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char * } // get the command - char *tok = mystrsep(&url, "/?&"); + char *tok = mystrsep(&url, "?"); if(tok && *tok) { debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, tok); uint32_t hash = simple_hash(tok); for(i = 0; api_commands[i].command ;i++) { if(unlikely(hash == api_commands[i].hash && !strcmp(tok, api_commands[i].command))) { - if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl)) + if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl)) return web_client_permission_denied(w); return api_commands[i].callback(host, w, url); diff --git a/web/api/web_api_v1.h b/web/api/web_api_v1.h index b6f315dca..70b781780 100644 --- a/web/api/web_api_v1.h +++ b/web/api/web_api_v1.h @@ -6,6 +6,7 @@ #include "daemon/common.h" #include "web/api/badges/web_buffer_svg.h" #include "web/api/formatters/rrd2json.h" +#include "web/api/health/health_cmdapi.h" extern uint32_t web_client_api_request_v1_data_options(char *o); extern uint32_t web_client_api_request_v1_data_format(char *name); @@ -19,8 +20,12 @@ extern int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, extern int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url); extern int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url); extern int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url); +extern int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url); extern int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url); extern void web_client_api_v1_init(void); +extern void web_client_api_v1_management_init(void); + +char *api_secret; #endif //NETDATA_WEB_API_V1_H diff --git a/web/gui/.well-known/dnt/cookies b/web/gui/.well-known/dnt/cookies new file mode 100644 index 000000000..b7c70e58d --- /dev/null +++ b/web/gui/.well-known/dnt/cookies @@ -0,0 +1,14 @@ +{ + "tracking": "T", + "compliance": ["https://github.com/netdata/netdata/wiki/cookies#compliance"], + "qualifiers": "afc", + "controller": ["https://github.com/netdata/netdata/wiki/cookies#controller"], + "same-party": [ + "my-netdata.io", + "mynetdata.io", + "netdata.online", + "netdata.rocks", + "registry.my-netdata.io" + ], + "policy": "https://github.com/netdata/netdata/wiki/cookies#policy", +} diff --git a/web/gui/Makefile.am b/web/gui/Makefile.am index 05d6f6542..ae8b49f40 100644 --- a/web/gui/Makefile.am +++ b/web/gui/Makefile.am @@ -77,7 +77,6 @@ dist_weblib_DATA = \ lib/bootstrap-table-export-1.11.0.min.js \ lib/bootstrap-toggle-2.2.2.min.js \ lib/clipboard-polyfill-be05dad.js \ - lib/c3-0.4.18.min.js \ lib/d3-4.12.2.min.js \ lib/d3pie-0.2.1-netdata-3.js \ lib/dygraph-c91c859.min.js \ @@ -89,10 +88,8 @@ dist_weblib_DATA = \ lib/jquery.peity-3.2.0.min.js \ lib/jquery.sparkline-2.1.2.min.js \ lib/lz-string-1.4.4.min.js \ - lib/morris-0.5.1.min.js \ lib/pako-1.0.6.min.js \ lib/perfect-scrollbar-0.6.15.min.js \ - lib/raphael-2.2.4-min.js \ lib/tableExport-1.6.0.min.js \ $(NULL) @@ -118,6 +115,7 @@ dist_webfonts_DATA = \ webimagesdir=$(webdir)/images dist_webimages_DATA = \ + images/netdata-logomark.svg \ images/alert-128-orange.png \ images/alert-128-red.png \ images/alert-multi-size-orange.ico \ diff --git a/web/gui/Makefile.in b/web/gui/Makefile.in deleted file mode 100644 index 76c157685..000000000 --- a/web/gui/Makefile.in +++ /dev/null @@ -1,838 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/gui -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) $(dist_web_DATA) $(dist_webcss_DATA) \ - $(dist_webdnt_DATA) $(dist_webfonts_DATA) \ - $(dist_webimages_DATA) $(dist_weblib_DATA) \ - $(dist_webwellknown_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__installdirs = "$(DESTDIR)$(webdir)" "$(DESTDIR)$(webcssdir)" \ - "$(DESTDIR)$(webdntdir)" "$(DESTDIR)$(webfontsdir)" \ - "$(DESTDIR)$(webimagesdir)" "$(DESTDIR)$(weblibdir)" \ - "$(DESTDIR)$(webwellknowndir)" -DATA = $(dist_noinst_DATA) $(dist_web_DATA) $(dist_webcss_DATA) \ - $(dist_webdnt_DATA) $(dist_webfonts_DATA) \ - $(dist_webimages_DATA) $(dist_weblib_DATA) \ - $(dist_webwellknown_DATA) -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ - -# -# Copyright (C) 2015 Alon Bar-Lev -# SPDX-License-Identifier: GPL-3.0-or-later -# -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - dashboard.js \ - version.txt \ - $(NULL) - -DASHBOARD_JS_FILES = \ - src/dashboard.js/prologue.js.inc \ - src/dashboard.js/utils.js \ - src/dashboard.js/server-detection.js \ - src/dashboard.js/dependencies.js \ - src/dashboard.js/error-handling.js \ - src/dashboard.js/compatibility.js \ - src/dashboard.js/xss.js \ - src/dashboard.js/colors.js \ - src/dashboard.js/units-conversion.js \ - src/dashboard.js/options.js \ - src/dashboard.js/localstorage.js \ - src/dashboard.js/timeout.js \ - src/dashboard.js/themes.js \ - src/dashboard.js/charting/dygraph.js \ - src/dashboard.js/charting/sparkline.js \ - src/dashboard.js/charting/google-charts.js \ - src/dashboard.js/charting/gauge.js \ - src/dashboard.js/charting/easy-pie-chart.js \ - src/dashboard.js/charting/d3pie.js \ - src/dashboard.js/charting/d3.js \ - src/dashboard.js/charting/peity.js \ - src/dashboard.js/charting.js \ - src/dashboard.js/chart-registry.js \ - src/dashboard.js/common.js \ - src/dashboard.js/main.js \ - src/dashboard.js/alarms.js \ - src/dashboard.js/registry.js \ - src/dashboard.js/boot.js \ - src/dashboard.js/epilogue.js.inc \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(DASHBOARD_JS_FILES) \ - $(NULL) - -dist_web_DATA = \ - demo.html \ - demo2.html \ - demosites.html \ - demosites2.html \ - dashboard.html \ - dashboard.js \ - dashboard_info.js \ - dashboard_info_custom_example.js \ - dashboard.css \ - dashboard.slate.css \ - favicon.ico \ - goto-host-from-alarm.html \ - index.html \ - main.css \ - main.js \ - infographic.html \ - robots.txt \ - refresh-badges.js \ - sitemap.xml \ - tv.html \ - version.txt \ - $(NULL) - -weblibdir = $(webdir)/lib -dist_weblib_DATA = \ - lib/bootstrap-3.3.7.min.js \ - lib/bootstrap-slider-10.0.0.min.js \ - lib/bootstrap-table-1.11.0.min.js \ - lib/bootstrap-table-export-1.11.0.min.js \ - lib/bootstrap-toggle-2.2.2.min.js \ - lib/clipboard-polyfill-be05dad.js \ - lib/c3-0.4.18.min.js \ - lib/d3-4.12.2.min.js \ - lib/d3pie-0.2.1-netdata-3.js \ - lib/dygraph-c91c859.min.js \ - lib/dygraph-smooth-plotter-c91c859.js \ - lib/fontawesome-all-5.0.1.min.js \ - lib/gauge-1.3.2.min.js \ - lib/jquery-2.2.4.min.js \ - lib/jquery.easypiechart-97b5824.min.js \ - lib/jquery.peity-3.2.0.min.js \ - lib/jquery.sparkline-2.1.2.min.js \ - lib/lz-string-1.4.4.min.js \ - lib/morris-0.5.1.min.js \ - lib/pako-1.0.6.min.js \ - lib/perfect-scrollbar-0.6.15.min.js \ - lib/raphael-2.2.4-min.js \ - lib/tableExport-1.6.0.min.js \ - $(NULL) - -webcssdir = $(webdir)/css -dist_webcss_DATA = \ - css/morris-0.5.1.css \ - css/bootstrap-3.3.7.css \ - css/bootstrap-theme-3.3.7.min.css \ - css/bootstrap-slate-flat-3.3.7.css \ - css/bootstrap-slider-10.0.0.min.css \ - css/bootstrap-toggle-2.2.2.min.css \ - css/c3-0.4.18.min.css \ - $(NULL) - -webfontsdir = $(webdir)/fonts -dist_webfonts_DATA = \ - fonts/glyphicons-halflings-regular.eot \ - fonts/glyphicons-halflings-regular.svg \ - fonts/glyphicons-halflings-regular.ttf \ - fonts/glyphicons-halflings-regular.woff \ - fonts/glyphicons-halflings-regular.woff2 \ - $(NULL) - -webimagesdir = $(webdir)/images -dist_webimages_DATA = \ - images/alert-128-orange.png \ - images/alert-128-red.png \ - images/alert-multi-size-orange.ico \ - images/alert-multi-size-red.ico \ - images/animated.gif \ - images/check-mark-2-128-green.png \ - images/check-mark-2-multi-size-green.ico \ - images/netdata.svg \ - images/post.png \ - images/android-icon-36x36.png \ - images/android-icon-48x48.png \ - images/android-icon-72x72.png \ - images/android-icon-96x96.png \ - images/android-icon-144x144.png \ - images/android-icon-192x192.png \ - images/apple-icon-57x57.png \ - images/apple-icon-60x60.png \ - images/apple-icon-72x72.png \ - images/apple-icon-76x76.png \ - images/apple-icon-114x114.png \ - images/apple-icon-120x120.png \ - images/apple-icon-144x144.png \ - images/apple-icon-152x152.png \ - images/apple-icon-180x180.png \ - images/apple-icon-precomposed.png \ - images/apple-icon.png \ - images/favicon-16x16.png \ - images/favicon-32x32.png \ - images/favicon-96x96.png \ - images/favicon.ico \ - images/ms-icon-70x70.png \ - images/ms-icon-144x144.png \ - images/ms-icon-150x150.png \ - images/ms-icon-310x310.png \ - images/banner-icon-144x144.png \ - $(NULL) - -webwellknowndir = $(webdir)/.well-known -dist_webwellknown_DATA = \ - $(NULL) - -webdntdir = $(webdir)/.well-known/dnt -dist_webdnt_DATA = \ - .well-known/dnt/cookies \ - $(NULL) - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/gui/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/gui/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): -install-dist_webDATA: $(dist_web_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webdir)" || exit $$?; \ - done - -uninstall-dist_webDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_web_DATA)'; test -n "$(webdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webdir)'; $(am__uninstall_files_from_dir) -install-dist_webcssDATA: $(dist_webcss_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webcss_DATA)'; test -n "$(webcssdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webcssdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webcssdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webcssdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webcssdir)" || exit $$?; \ - done - -uninstall-dist_webcssDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webcss_DATA)'; test -n "$(webcssdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webcssdir)'; $(am__uninstall_files_from_dir) -install-dist_webdntDATA: $(dist_webdnt_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webdnt_DATA)'; test -n "$(webdntdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webdntdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webdntdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webdntdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webdntdir)" || exit $$?; \ - done - -uninstall-dist_webdntDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webdnt_DATA)'; test -n "$(webdntdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webdntdir)'; $(am__uninstall_files_from_dir) -install-dist_webfontsDATA: $(dist_webfonts_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webfonts_DATA)'; test -n "$(webfontsdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webfontsdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webfontsdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webfontsdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webfontsdir)" || exit $$?; \ - done - -uninstall-dist_webfontsDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webfonts_DATA)'; test -n "$(webfontsdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webfontsdir)'; $(am__uninstall_files_from_dir) -install-dist_webimagesDATA: $(dist_webimages_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webimages_DATA)'; test -n "$(webimagesdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webimagesdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webimagesdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webimagesdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webimagesdir)" || exit $$?; \ - done - -uninstall-dist_webimagesDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webimages_DATA)'; test -n "$(webimagesdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webimagesdir)'; $(am__uninstall_files_from_dir) -install-dist_weblibDATA: $(dist_weblib_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_weblib_DATA)'; test -n "$(weblibdir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(weblibdir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(weblibdir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(weblibdir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(weblibdir)" || exit $$?; \ - done - -uninstall-dist_weblibDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_weblib_DATA)'; test -n "$(weblibdir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(weblibdir)'; $(am__uninstall_files_from_dir) -install-dist_webwellknownDATA: $(dist_webwellknown_DATA) - @$(NORMAL_INSTALL) - @list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \ - if test -n "$$list"; then \ - echo " $(MKDIR_P) '$(DESTDIR)$(webwellknowndir)'"; \ - $(MKDIR_P) "$(DESTDIR)$(webwellknowndir)" || exit 1; \ - fi; \ - for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - echo "$$d$$p"; \ - done | $(am__base_list) | \ - while read files; do \ - echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(webwellknowndir)'"; \ - $(INSTALL_DATA) $$files "$(DESTDIR)$(webwellknowndir)" || exit $$?; \ - done - -uninstall-dist_webwellknownDATA: - @$(NORMAL_UNINSTALL) - @list='$(dist_webwellknown_DATA)'; test -n "$(webwellknowndir)" || list=; \ - files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ - dir='$(DESTDIR)$(webwellknowndir)'; $(am__uninstall_files_from_dir) -tags TAGS: - -ctags CTAGS: - -cscope cscopelist: - - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(webdir)" "$(DESTDIR)$(webcssdir)" "$(DESTDIR)$(webdntdir)" "$(DESTDIR)$(webfontsdir)" "$(DESTDIR)$(webimagesdir)" "$(DESTDIR)$(weblibdir)" "$(DESTDIR)$(webwellknowndir)"; do \ - test -z "$$dir" || $(MKDIR_P) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-am - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: install-dist_webDATA install-dist_webcssDATA \ - install-dist_webdntDATA install-dist_webfontsDATA \ - install-dist_webimagesDATA install-dist_weblibDATA \ - install-dist_webwellknownDATA - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-dist_webDATA uninstall-dist_webcssDATA \ - uninstall-dist_webdntDATA uninstall-dist_webfontsDATA \ - uninstall-dist_webimagesDATA uninstall-dist_weblibDATA \ - uninstall-dist_webwellknownDATA - -.MAKE: install-am install-strip - -.PHONY: all all-am check check-am clean clean-generic cscopelist-am \ - ctags-am distclean distclean-generic distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dist_webDATA install-dist_webcssDATA \ - install-dist_webdntDATA install-dist_webfontsDATA \ - install-dist_webimagesDATA install-dist_weblibDATA \ - install-dist_webwellknownDATA install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags-am uninstall uninstall-am \ - uninstall-dist_webDATA uninstall-dist_webcssDATA \ - uninstall-dist_webdntDATA uninstall-dist_webfontsDATA \ - uninstall-dist_webimagesDATA uninstall-dist_weblibDATA \ - uninstall-dist_webwellknownDATA - - -dashboard.js: $(DASHBOARD_JS_FILES) - if test -f $@; then rm -f $@; fi - cat $(DASHBOARD_JS_FILES) > $@.tmp && mv $@.tmp $@ - -version.txt: - if test -d "$(top_srcdir)/.git"; then \ - git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \ - fi > $@.tmp - test -s $@.tmp || echo 0 > $@.tmp - mv $@.tmp $@ - -# regenerate these files, even if they are up to date -.PHONY: version.txt dashboard.js - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/gui/README.md b/web/gui/README.md index 4cb050256..9be9ffc9c 100644 --- a/web/gui/README.md +++ b/web/gui/README.md @@ -1,4 +1,4 @@ -# Netdata Agent Web GUI +# Netdata agent web GUI ## Generating dashboard.js @@ -104,3 +104,5 @@ This is a chart that displays the hotwater temperature in the given range of 40 > ``` ![hot water chart](https://user-images.githubusercontent.com/12159026/28666665-a7d68ad2-72c8-11e7-9a96-f6bf9691b471.png) + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/gui/browserconfig.xml b/web/gui/browserconfig.xml new file mode 100644 index 000000000..32f47595e --- /dev/null +++ b/web/gui/browserconfig.xml @@ -0,0 +1,2 @@ + +#ffffff diff --git a/web/gui/confluence/README.md b/web/gui/confluence/README.md index 3973c10be..3d7eda6a1 100644 --- a/web/gui/confluence/README.md +++ b/web/gui/confluence/README.md @@ -1,4 +1,4 @@ -# Atlassian Confluence Dashboards +# Atlassian Confluence dashboards With netdata you can build **live, interactive, monitoring dashboards** directly on Atlassian's **Confluence** pages. @@ -1010,3 +1010,5 @@ NETDATA.options.current.eliminate_zero_dimensions = false; ``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fconfluence%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/gui/custom/README.md b/web/gui/custom/README.md index 7e1877a4d..5ee723547 100644 --- a/web/gui/custom/README.md +++ b/web/gui/custom/README.md @@ -1,4 +1,4 @@ -# Custom Dashboards +# Custom dashboards You can: @@ -341,7 +341,7 @@ On charts that by default have a legend managed by `dashboard.js` you can remove ### API options -You can append netdata **[[REST API v1]]** data options, using this: +You can append netdata **[REST API v1](../../api)** data options, using this: ```html
``` + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fgui%2Fcustom%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/gui/dashboard.html b/web/gui/dashboard.html index e0afefda0..d32224802 100644 --- a/web/gui/dashboard.html +++ b/web/gui/dashboard.html @@ -696,4 +696,4 @@ So, to avoid flashing the charts, we destroy and re-create the charts on each up - + diff --git a/web/gui/dashboard.js b/web/gui/dashboard.js index 8a8061a55..8fea62541 100644 --- a/web/gui/dashboard.js +++ b/web/gui/dashboard.js @@ -77,7 +77,8 @@ // ---------------------------------------------------------------------------- // global namespace -const NETDATA = window.NETDATA || {}; +// Should stay var! +var NETDATA = window.NETDATA || {}; (function(window, document, $, undefined) { @@ -158,7 +159,7 @@ NETDATA.seconds4human = function (seconds, options) { if (typeof options !== 'object') { options = defaultOptions; } else { - for (const x in defaultOptions) { + for (var x in defaultOptions) { if (typeof options[x] !== 'string') { options[x] = defaultOptions[x]; } @@ -690,7 +691,7 @@ NETDATA.xss = { } else { // console.log('checking object "' + name + '"'); - for (const i in obj) { + for (var i in obj) { if (obj.hasOwnProperty(i) === false) { continue; } @@ -805,6 +806,21 @@ NETDATA.unitsConversion = { 'GB/s': 1024 * 1024, 'TB/s': 1024 * 1024 * 1024 }, + 'KiB/s': { + 'B/s': 1 / 1024, + 'KiB/s': 1, + 'MiB/s': 1024, + 'GiB/s': 1024 * 1024, + 'TiB/s': 1024 * 1024 * 1024 + }, + 'B': { + 'B': 1, + 'KiB': 1024, + 'MiB': 1024 * 1024, + 'GiB': 1024 * 1024 * 1024, + 'TiB': 1024 * 1024 * 1024 * 1024, + 'PiB': 1024 * 1024 * 1024 * 1024 * 1024 + }, 'KB': { 'B': 1 / 1024, 'KB': 1, @@ -812,6 +828,13 @@ NETDATA.unitsConversion = { 'GB': 1024 * 1024, 'TB': 1024 * 1024 * 1024 }, + 'KiB': { + 'B': 1 / 1024, + 'KiB': 1, + 'MiB': 1024, + 'GiB': 1024 * 1024, + 'TiB': 1024 * 1024 * 1024 + }, 'MB': { 'B': 1 / (1024 * 1024), 'KB': 1 / 1024, @@ -820,6 +843,14 @@ NETDATA.unitsConversion = { 'TB': 1024 * 1024, 'PB': 1024 * 1024 * 1024 }, + 'MiB': { + 'B': 1 / (1024 * 1024), + 'KiB': 1 / 1024, + 'MiB': 1, + 'GiB': 1024, + 'TiB': 1024 * 1024, + 'PiB': 1024 * 1024 * 1024 + }, 'GB': { 'B': 1 / (1024 * 1024 * 1024), 'KB': 1 / (1024 * 1024), @@ -828,6 +859,15 @@ NETDATA.unitsConversion = { 'TB': 1024, 'PB': 1024 * 1024, 'EB': 1024 * 1024 * 1024 + }, + 'GiB': { + 'B': 1 / (1024 * 1024 * 1024), + 'KiB': 1 / (1024 * 1024), + 'MiB': 1 / 1024, + 'GiB': 1, + 'TiB': 1024, + 'PiB': 1024 * 1024, + 'EiB': 1024 * 1024 * 1024 } /* 'milliseconds': { @@ -1027,7 +1067,7 @@ NETDATA.unitsConversion = { // } // } const sunit = this.scalableUnits[units]; - for (const x of Object.keys(sunit)) { + for (var x of Object.keys(sunit)) { let m = sunit[x]; if (m <= max && m > tdivider) { tunits = x; @@ -1063,7 +1103,7 @@ NETDATA.unitsConversion = { // find the max divider of all charts let common_units = t[uuid]; - for (const x in t) { + for (var x in t) { if (t.hasOwnProperty(x) && t[x].divider > common_units.divider) { common_units = t[x]; } @@ -1130,7 +1170,7 @@ NETDATA.unitsConversion = { } else if (typeof this.convertibleUnits[units] !== 'undefined') { // units that can be converted if (desired_units === 'auto') { - for (const x in this.convertibleUnits[units]) { + for (var x in this.convertibleUnits[units]) { if (this.convertibleUnits[units].hasOwnProperty(x)) { if (this.convertibleUnits[units][x].check(max)) { //console.log('DEBUG: ' + uuid.toString() + ' converting ' + units.toString() + ' to: ' + x.toString()); @@ -1186,7 +1226,7 @@ if (typeof netdataIcons === 'object') { // if (NETDATA.icons.hasOwnProperty(icon) && typeof(netdataIcons[icon]) === 'string') // NETDATA.icons[icon] = netdataIcons[icon]; // } - for (const icon of Object.keys(NETDATA.icons)) { + for (var icon of Object.keys(NETDATA.icons)) { if (typeof(netdataIcons[icon]) === 'string') { NETDATA.icons[icon] = netdataIcons[icon] } @@ -1206,7 +1246,7 @@ if (typeof netdataShowAlarms === 'undefined') { } if (typeof netdataRegistryAfterMs !== 'number' || netdataRegistryAfterMs < 0) { - netdataRegistryAfterMs = 1500; + netdataRegistryAfterMs = 0; // 1500; } if (typeof netdataRegistry === 'undefined') { @@ -1664,6 +1704,8 @@ NETDATA.timeout = { }; NETDATA.timeout.init(); +// Codacy declarations +/* global netdataTheme */ NETDATA.themes = { white: { @@ -1756,6 +1798,10 @@ NETDATA.colors = NETDATA.themes.current.colors; //NETDATA.colors = [ '#5DA5DA', '#F15854', '#FAA43A', '#60BD68', '#F17CB0', '#B2912F', '#B276B2', '#DECF3F', '#4D4D4D' ]; // dygraph +// Codacy declarations +/* global smoothPlotter */ +/* global Dygraph */ + NETDATA.dygraph = { smooth: false }; @@ -4746,7 +4792,7 @@ NETDATA.commonMin = { // for (let i in t) { // if (t.hasOwnProperty(i) && t[i] < m) m = t[i]; // } - for (const ti of Object.values(t)) { + for (var ti of Object.values(t)) { if (ti < m) { m = ti; } @@ -4810,7 +4856,7 @@ NETDATA.commonMax = { // for (let i in t) { // if (t.hasOwnProperty(i) && t[i] > m) m = t[i]; // } - for (const ti of Object.values(t)) { + for (var ti of Object.values(t)) { if (ti > m) { m = ti; } @@ -4940,6 +4986,9 @@ NETDATA.commonColors = { // *** src/dashboard.js/main.js +// Codacy declarations +/* global clipboard */ + if (NETDATA.options.debug.main_loop) { console.log('welcome to NETDATA'); } @@ -9637,6 +9686,8 @@ NETDATA.alarms = { NETDATA.registry = { server: null, // the netdata registry server + isCloudEnabled: false,// is netdata.cloud functionality enabled? + cloudBaseURL: null, // the netdata cloud base url person_guid: null, // the unique ID of this browser / user machine_guid: null, // the unique ID the netdata server that served dashboard.js hostname: 'unknown', // the hostname of the netdata server that served dashboard.js @@ -9644,8 +9695,17 @@ NETDATA.registry = { machines_array: null, // the user's other URLs in an array person_urls: null, + MASKED_DATA: "***", + + isUsingGlobalRegistry: function() { + return NETDATA.registry.server == "https://registry.my-netdata.io"; + }, + + isRegistryEnabled: function() { + return !(NETDATA.registry.isUsingGlobalRegistry() || isSignedIn()) + }, + parsePersonUrls: function (person_urls) { - // console.log(person_urls); NETDATA.registry.person_urls = person_urls; if (person_urls) { @@ -9698,13 +9758,21 @@ NETDATA.registry = { NETDATA.registry.hello(NETDATA.serverDefault, function (data) { if (data) { NETDATA.registry.server = data.registry; + if (data.cloud_base_url != "") { + NETDATA.registry.isCloudEnabled = true; + NETDATA.registry.cloudBaseURL = data.cloud_base_url; + } else { + NETDATA.registry.isCloudEnabled = false; + NETDATA.registry.cloudBaseURL = ""; + } NETDATA.registry.machine_guid = data.machine_guid; NETDATA.registry.hostname = data.hostname; - + if (dataLayer) { + if (data.anonymous_statistics) dataLayer.push({"anonymous_statistics" : "true", "machine_guid" : data.machine_guid}); + } NETDATA.registry.access(2, function (person_urls) { NETDATA.registry.parsePersonUrls(person_urls); - - }); + }); } }); }, @@ -9747,13 +9815,25 @@ NETDATA.registry = { }, access: function (max_redirects, callback) { + let name = NETDATA.registry.MASKED_DATA; + let url = NETDATA.registry.MASKED_DATA; + + if (!NETDATA.registry.isUsingGlobalRegistry()) { + // If the user is using a private registry keep sending identifiable + // data. + name = NETDATA.registry.hostname; + url = NETDATA.serverDefault; + } + + console.log("ACCESS", name, url); + // send ACCESS to a netdata registry: // 1. it lets it know we are accessing a netdata server (its machine GUID and its URL) // 2. it responds with a list of netdata servers we know // the registry identifies us using a cookie it sets the first time we access it // the registry may respond with a redirect URL to send us to another registry $.ajax({ - url: NETDATA.registry.server + '/api/v1/registry?action=access&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault), // + '&visible_url=' + encodeURIComponent(document.location), + url: NETDATA.registry.server + '/api/v1/registry?action=access&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(name) + '&url=' + encodeURIComponent(url), // + '&visible_url=' + encodeURIComponent(document.location), async: true, cache: false, headers: { @@ -9785,14 +9865,14 @@ NETDATA.registry = { return callback(null); } } - } - else { + } else { if (typeof data.person_guid === 'string') { NETDATA.registry.person_guid = data.person_guid; } if (typeof callback === 'function') { - return callback(data.urls); + const urls = data.urls.filter((u) => u[1] !== NETDATA.registry.MASKED_DATA); + return callback(urls); } } }) diff --git a/web/gui/dashboard_info.js b/web/gui/dashboard_info.js index 2f542d436..00cac6317 100644 --- a/web/gui/dashboard_info.js +++ b/web/gui/dashboard_info.js @@ -1,5 +1,8 @@ // SPDX-License-Identifier: GPL-3.0-or-later +// Codacy declarations +/* global NETDATA */ + var netdataDashboard = window.netdataDashboard || {}; // Informational content for the various sections of the GUI (menus, sections, charts, etc.) @@ -436,10 +439,10 @@ netdataDashboard.menu = { info: undefined }, - 'linux_power_supply': { + 'powersupply': { title: 'Power Supply', icon: '', - info: 'Statistics for the various system power supplies.' + info: 'Statistics for the various system power supplies. Data collected from Linux power supply class.' } }; @@ -647,7 +650,7 @@ netdataDashboard.context = { }, 'system.swapio': { - info: 'Total Swap I/O. (netdata measures both in and out. If either of them is not shown in the chart, it is because it is zero - you can change the page settings to always render all the available dimensions on all charts).' + info: 'Total Swap I/O. (netdata measures both in and out. If either of the metrics in or out is not shown in the chart, the reason is that the metric is zero. - you can change the page settings to always render all the available dimensions on all charts).' }, 'system.pgfaults': { @@ -2315,6 +2318,25 @@ netdataDashboard.context = { 'proxysql.commands_duration': { info: 'The total time spent executing commands of that type, in ms' + }, + + // ------------------------------------------------------------------------ + // Power Supplies + + 'powersupply.capacity': { + info: undefined + }, + + 'powersupply.charge': { + info: undefined + }, + + 'powersupply.energy': { + info: undefined + }, + + 'powersupply.voltage': { + info: undefined } // ------------------------------------------------------------------------ diff --git a/web/gui/dashboard_info_custom_example.js b/web/gui/dashboard_info_custom_example.js index 51ce0be22..6a2d53765 100644 --- a/web/gui/dashboard_info_custom_example.js +++ b/web/gui/dashboard_info_custom_example.js @@ -21,6 +21,9 @@ * */ +// Codacy declarations +/* global customDashboard */ + // ---------------------------------------------------------------------------- // MENU // diff --git a/web/gui/demo.html b/web/gui/demo.html new file mode 100644 index 000000000..68f374b65 --- /dev/null +++ b/web/gui/demo.html @@ -0,0 +1,51 @@ + + + + + NetData Dashboard + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + diff --git a/web/gui/demo2.html b/web/gui/demo2.html new file mode 100644 index 000000000..183a9550d --- /dev/null +++ b/web/gui/demo2.html @@ -0,0 +1,143 @@ + + + + + NetData Dashboard + + + + + + + + + + + + + + + + + + + + + + +
+
why netdata?
+
+
These charts visualize the same data...
+ + + + + + +
+
+ +
+
I can trace an issue like this
+
+
+
+
+
Can you trace an issue like these?
 
+
+
+
+
+
+ +
+
I can trace an issue like this
+
+
+
+
+
Can you trace an issue like these?
 
+
+
+
+
+
+
Hover on the chart below, to see the selected value on the charts above!
+
+
+ + diff --git a/web/gui/demosites.html b/web/gui/demosites.html new file mode 100644 index 000000000..33a771db4 --- /dev/null +++ b/web/gui/demosites.html @@ -0,0 +1,1501 @@ + + + + + + NetData: Get control of your Linux Servers. Simple. Effective. Awesome. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

Monitor your systems and applications, the right way!

+

+ Unparalleled insights, in real-time, + of everything happening on your systems and applications, + with stunning, interactive web dashboards + and powerful performance and health alarms. +

+ +
+
+
+

Enter the world of Netdata!

+
+

1s granularity

+
+
+
+

+ Per second data collection and visualization, for all metrics! +

+

+ Netdata zooms into the problems by providing higher resolution information, compared to any other monitoring solution. +

+
+
+

+ The world goes real-time. +
 
+ High resolution metrics are required to effectively monitor and troubleshoot systems and applications, especially on virtual environments. +

+
+
+
+ +
+ +
+
+
+

Unlimited metrics

+
+
+
+

+ Use all the metrics, from all available sources! +

+

+ Netdata collects all the metrics native console tools do. It has been designed to kill the console for troubleshooting infrastructure slowdowns and outages. +

+
+
+

+ All metrics are important and all should be available when you need them. +
 
+ Filtering out most metrics is like reading a book by skipping most of its pages. +

+
+
+
+ +
+
+
+
+

Meaningful presentation

+
+
+
+

+ Explore all metrics in a meaningful, easy to understand way! +

+

+ Netdata engineers and experts on our community organize metrics in a meaningful way, so that you can learn and understand them right on the job, while troubleshooting issues of your infrastructure. +

+
+
+

+ Metrics are a lot more than name-value pairs over time. +

+ It is just not practical to require from all users to have a deep understanding of all metrics for monitoring their systems and applications. +

+
+
+
+ +
+ +
+
+
+

Immediate results

+
+
+
+

+ Install and use immediately! Get fully functional visualization and alarms, in just a couple of seconds after installation! +

+

+ Netdata decouples your skills from your monitoring infrastructure. + No matter how skillful or novice you are, Netdata will apply all the community knowledge and expertise to your monitoring infrastructure. +

+
+
+

+ Most of our infrastructure is based on standardized systems and applications. +
 
+ It is a tremendous waste of time and effort, in a global scale, to require from all users to configure their infrastructure dashboards and alarms metric by metric. +

+
+
+
+ +
+ +
+

How it works

+
+

+ Netdata is a monitoring agent you install on all your systems: +
+ physical servers, virtual servers, containers, IoT. +

+

+ Netdata is lightweight, designed to permanently run on all systems without disrupting their core function. + By default, it needs just 1% CPU of a single core, a few MB or RAM and no disk I/O at all. +

+

+ Each Netdata is (by default) autonomous, taking care of all the following. +
But all your Netdata are integrated into one large distributed application. +

+
+
+

Collect

+

+ Netdata automatically detects data collection sources on the host it runs. + It comes with hundreds of plugins for collecting system and application metrics, + including databases, web servers, and commonly used application servers. +

+ Netdata is also a high performance, distributed statsd server, allowing custom + application metrics to be collected and visualized. +

+
+

Check (alarms)

+

+ Each Netdata spawns a thread that examines the metrics as they get collected, + evaluates pre-configured alarm expressions and triggers alarm notifications. +

+ Netdata comes with hundreds of alarms to detect common system and application issues, + that are automatically attached to the collected metrics, + supporting dozens of alarm notification integrations. +

+
+

Stream

+

+ Each Netdata can stream its metrics, in real-time, to any other Netdata. + Streaming allows Netdata to be used in ephemeral nodes and containers in auto-scaled environments, + but it also allows building Netdata hierarchies for aggregating the metrics of multiple Netdata nodes. +

+
+

Store

+

+ Each Netdata has its own internal metrics database. This database is optimized + for minimal memory footprint, and due to its lockless design allows one writer + and multiple readers per metric, concurrently, contributing significantly to + the performance of Netdata. +

+
+

Archive

+

+ Netdata can archive its metrics to time-series databases (prometheus, graphite, opentsdb, json document dbs, etc) + so that Netdata can be integrated to existing monitoring tool-chains. +

+
+

Visualize

+

+ The best part of Netdata is its visualization. Low latency, speedy and snazzy. +

+ Netdata dashboards are optimized for visual anomaly detection, a powerful tool to troubleshoot + performance issues. +

+
+
+
+
+ +
+   +
+ + +

netdata live demo sites

+
+
+ +
+
+
+
+
+ Enter London! +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+ Enter Atlanta! +
+ Donated by CDN77.com +
+
+
+
+
+
+
+
+ Enter California! +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+ Enter Canada! +
+ Donated by DigitalOcean.com +
+
+
+
 
+
+
+
+
+
+ Enter Germany! +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+ Enter New York! +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+ Enter Singapore! +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+ Enter India! +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+ Isreal +
+
+
+ requests/s +
+
+
+
+
+
+
+ Enter Isreal! +
+ Donated by octopuscs.com +
+
+
+
+
+
+ EU - France +
+
+
+ requests/s +
+
+
+
+
+
+
+ Enter Roubaix! +
+ Donated by ventureer.com +
+
+
+
+
+
+ EU - Spain +
+
+
+ requests/s +
+
+
+
+
+
+
+ Enter Madrid! +
+ Donated by stackscale.com +
+
+
+
+
+
+
+
+
+ +
+ Charts are coming from all servers, in parallel. +
+ The servers are not aware of this multi-server dashboard. +
+ +
+
+
+ EU - London connected clients +
+
+
+
+
+
+ +
+ Each server is not aware of the other servers. +
+ But on this dashboard they are one! (hover on the chart above) +
+ + + +
+
+ +

Who uses netdata?

+
+
+

+ Netdata is used by hundreds of thousands of users all over the world. +
 
+ Check our GitHub watchers list. +
+ You will find people working for Amazon, Atos, Baidu, Cisco Systems, Citrix, + Deutsche Telekom, DigitalOcean, Elastic, EPAM Systems, Ericsson, Google, + Groupon, Hortonworks, HP, Huawei, IBM, Microsoft, NewRelic, + Nvidia, Red Hat, SAP, Selectel, TicketMaster, Vimeo, and many more! +

+ + The following figures come from users using the netdata public global registry.
Counting since May 16th 2016. Actual figures may be a lot higher.
+
+
+
+ netdata unique users +
+
+
+
+
+
+
+
+
+ netdata monitored servers +
+
+
+
+
+
+
+
+
+ netdata sessions served +
+
+
+
+
+
+

+ + +

+

+ + netdata can generate auto-refreshing badges, like these: + +
+ + + +
+ These badges auto-refresh every minute. +

+
+
+ netdata is featured at the GitHub's state of the Octoverse 2016 +
+ + + +
+
+ +
+
+
+
+ + + + + + +
+
+ + + + + + + + + + +
+ + + + + + + diff --git a/web/gui/demosites2.html b/web/gui/demosites2.html new file mode 100644 index 000000000..41ad9d6c6 --- /dev/null +++ b/web/gui/demosites2.html @@ -0,0 +1,1112 @@ + + + + + NetData - Real-time performance monitoring, done right! + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ netdata +
+
+ real-time performance monitoring +
+
+ scaled out! +
+
+ pick a netdata demo server +
+
+ these demo servers show what you will get by installing netdata +
+ +
+
+ +
+
+
+
+
+
 
+ +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+
 
+ +
+ Donated by CDN77.com +
+
+
+
+
+
+
+
+
 
+ +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+
 
+ +
+ Donated by DigitalOcean.com +
+
+
+
 
+
+
+
+
+
+
 
+ +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+
 
+ +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+
 
+ +
+ Donated by DigitalOcean.com +
+
+
+
+
+
+
+
+
 
+ +
+ Donated by DigitalOcean.com +
+
+
+
+
+ +
+ this page is a custom netdata dashboard +
+
+ charts are coming from 8 servers, in parallel +
+ the servers are not aware of this multi-server dashboard, +
+ each server is not aware of the other servers, +
+ but on this dashboard they are one! +
+
+ + hover on a chart below, or drag it to show the past - the others will follow! +
+ double click on a chart to reset them all +
+ +
+ our nginx performance +
+
+ (we proxy netdata through nginx, on the demo sites) +
+ + + + + +
+
+
+
+ EU - London web requests/s +
+
+
+
+
+ +
+
+ US - Atlanta web requests/s +
+
+
+
+
+ +
+
+ US - California web requests/s +
+
+
+
+
+ +
+
+ Canada web requests/s +
+
+
+
+
+
+ +
+
+
+ EU - London active connections +
+
+
+
+
+ +
+
+ US - Atlanta active connections +
+
+
+
+
+ +
+
+ US - California active connections +
+
+
+
+
+ +
+
+ Canada active connections +
+
+
+
+
+
+
+ +
+ these charts are draggable and touchable, double click them to reset them +
+ + +
+ bandwidth consumption on the demo sites +
+
+ Linux QoS is configured by FireQOS +
+ + + + + +
+
+
+
+
+ +
+
+ +
+ +
+
+
+ +
+
+
+
+ +
+
+
+ +
+ +
+
+ +
+ +
+
+
+ +
+
+
+
+
+
+ these legends are interactive and the charts are resizable here ^^^ +
+ +
+ DDoS protection performance on the demo sites +
+
+ iptables SYNPROXY configured by FireHOL +
+ +
+ +
+
+ EU - London, TCP SYN packets/s received +
+
+
+
+
+ +
+
+ US - Atlanta, TCP SYN packets/s received +
+
+
+
+
+ +
+
+ US - California, TCP SYN packets/s received +
+
+
+
+
+ +
+
+ Canada, TCP SYN packets/s received +
+
+
+
+
+
+
+ did you notice the decimal numbers? +
netdata interpolates collected values at second boundaries, with nanosecond detail!
+
+ + +
+ CPU Utilization of the demo sites +
+ +
+
+
+
+ +
+
+
+ +
+
+
+ +
+
+
+
+
+ what is using so much CPU? +
The site iplists.firehol.org is maintained by FireHOL - the CPU is used for comparing security IP Lists.
+
+ +
+ Netdata performance +
+
+ netdata monitors users, user groups, applications (process trees) +
+ containers (lxc, docker, etc.) and SNMP devices. +
+ + + + + +
+
+
+
+ EU - London, CPU % of a single core +
+
+
+
+
+ +
+
+ US - Atlanta, CPU % of a single core +
+
+
+
+
+ +
+
+ US - California, CPU % of a single core +
+
+
+
+
+ +
+
+ Toronto, CPU % of a single core +
+
+
+
+
+ +
+ this utilization is about the whole netdata process tree and the percentage is of a single core! +
including BASH plugins (it monitors mysql on the demo sites), node.js plugins (it monitors bind9 on the demo sites), etc. +
and including the chart refreshes for the dashboards of all viewers.
+
+
+ +
+
+
+ EU - London, API average response time in milliseconds +
+
+
+
+
+ +
+
+ US - Atlanta, API average response time in milliseconds +
+
+
+
+
+ +
+
+ US - California, API average response time in milliseconds +
+
+
+
+
+ +
+
+ Canada, API average response time in milliseconds +
+
+
+
+
+ +
+ netdata is really fast (the values are milliseconds!) +
+ These values include everything, from the reception of the first byte to the dispatch of the last, including gzip compression. +
+ Values above 2-3ms are usually chart refreshes of charts with several dimensions, charts with very long durations (zoomed out), or file transfers. +
+
+
+
+ +
+ want to know more? +
+ jump to the netdata page at github +
+ it needs just 3 mins to be installed on your servers! +
+   +
+
+ + + diff --git a/web/gui/fonts/glyphicons-halflings-regular.eot b/web/gui/fonts/glyphicons-halflings-regular.eot new file mode 100644 index 000000000..b93a4953f Binary files /dev/null and b/web/gui/fonts/glyphicons-halflings-regular.eot differ diff --git a/web/gui/fonts/glyphicons-halflings-regular.svg b/web/gui/fonts/glyphicons-halflings-regular.svg new file mode 100644 index 000000000..2a4aabacf --- /dev/null +++ b/web/gui/fonts/glyphicons-halflings-regular.svg @@ -0,0 +1,289 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/web/gui/fonts/glyphicons-halflings-regular.ttf b/web/gui/fonts/glyphicons-halflings-regular.ttf new file mode 100644 index 000000000..1413fc609 Binary files /dev/null and b/web/gui/fonts/glyphicons-halflings-regular.ttf differ diff --git a/web/gui/fonts/glyphicons-halflings-regular.woff b/web/gui/fonts/glyphicons-halflings-regular.woff new file mode 100644 index 000000000..9e612858f Binary files /dev/null and b/web/gui/fonts/glyphicons-halflings-regular.woff differ diff --git a/web/gui/fonts/glyphicons-halflings-regular.woff2 b/web/gui/fonts/glyphicons-halflings-regular.woff2 new file mode 100644 index 000000000..64539b54c Binary files /dev/null and b/web/gui/fonts/glyphicons-halflings-regular.woff2 differ diff --git a/web/gui/goto-host-from-alarm.html b/web/gui/goto-host-from-alarm.html index 5eb66b5d0..eb1d4839b 100644 --- a/web/gui/goto-host-from-alarm.html +++ b/web/gui/goto-host-from-alarm.html @@ -2,16 +2,21 @@ + Goto a host you know... - - netdata dashboard @@ -13,8 +20,8 @@ - - + + @@ -165,7 +218,7 @@ Copyright 2016-2018, Costa Tsaousis.
- Released under GPL v3 or later. + Released under GPL v3 or later. Netdata uses third party tools.

@@ -418,7 +471,7 @@
Enjoy real-time performance monitoring! @@ -1146,7 +1199,6 @@
+ + + + + - + diff --git a/web/gui/lib/c3-0.4.18.min.js b/web/gui/lib/c3-0.4.18.min.js deleted file mode 100644 index 9491b72a6..000000000 --- a/web/gui/lib/c3-0.4.18.min.js +++ /dev/null @@ -1,2 +0,0 @@ -// SPDX-License-Identifier: MIT -!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):t.c3=e()}(this,function(){"use strict";function t(t,e){var i=this;i.component=t,i.params=e||{},i.d3=t.d3,i.scale=i.d3.scale.linear(),i.range,i.orient="bottom",i.innerTickSize=6,i.outerTickSize=this.params.withOuterTick?6:0,i.tickPadding=3,i.tickValues=null,i.tickFormat,i.tickArguments,i.tickOffset=0,i.tickCulling=!0,i.tickCentered,i.tickTextCharSize,i.tickTextRotate=i.params.tickTextRotate,i.tickLength,i.axis=i.generateAxis()}function e(t){var e=this.internal=new i(this);e.loadConfig(t),e.beforeInit(t),e.init(),e.afterInit(t),function t(e,i,n){Object.keys(e).forEach(function(a){i[a]=e[a].bind(n),Object.keys(e[a]).length>0&&t(e[a],i[a],n)})}(P,this,this)}function i(t){var e=this;e.d3=window.d3?window.d3:"undefined"!=typeof require?require("d3"):void 0,e.api=t,e.config=e.getDefaultConfig(),e.data={},e.cache={},e.axes={}}var n,a,r={target:"c3-target",chart:"c3-chart",chartLine:"c3-chart-line",chartLines:"c3-chart-lines",chartBar:"c3-chart-bar",chartBars:"c3-chart-bars",chartText:"c3-chart-text",chartTexts:"c3-chart-texts",chartArc:"c3-chart-arc",chartArcs:"c3-chart-arcs",chartArcsTitle:"c3-chart-arcs-title",chartArcsBackground:"c3-chart-arcs-background",chartArcsGaugeUnit:"c3-chart-arcs-gauge-unit",chartArcsGaugeMax:"c3-chart-arcs-gauge-max",chartArcsGaugeMin:"c3-chart-arcs-gauge-min",selectedCircle:"c3-selected-circle",selectedCircles:"c3-selected-circles",eventRect:"c3-event-rect",eventRects:"c3-event-rects",eventRectsSingle:"c3-event-rects-single",eventRectsMultiple:"c3-event-rects-multiple",zoomRect:"c3-zoom-rect",brush:"c3-brush",focused:"c3-focused",defocused:"c3-defocused",region:"c3-region",regions:"c3-regions",title:"c3-title",tooltipContainer:"c3-tooltip-container",tooltip:"c3-tooltip",tooltipName:"c3-tooltip-name",shape:"c3-shape",shapes:"c3-shapes",line:"c3-line",lines:"c3-lines",bar:"c3-bar",bars:"c3-bars",circle:"c3-circle",circles:"c3-circles",arc:"c3-arc",arcs:"c3-arcs",area:"c3-area",areas:"c3-areas",empty:"c3-empty",text:"c3-text",texts:"c3-texts",gaugeValue:"c3-gauge-value",grid:"c3-grid",gridLines:"c3-grid-lines",xgrid:"c3-xgrid",xgrids:"c3-xgrids",xgridLine:"c3-xgrid-line",xgridLines:"c3-xgrid-lines",xgridFocus:"c3-xgrid-focus",ygrid:"c3-ygrid",ygrids:"c3-ygrids",ygridLine:"c3-ygrid-line",ygridLines:"c3-ygrid-lines",axis:"c3-axis",axisX:"c3-axis-x",axisXLabel:"c3-axis-x-label",axisY:"c3-axis-y",axisYLabel:"c3-axis-y-label",axisY2:"c3-axis-y2",axisY2Label:"c3-axis-y2-label",legendBackground:"c3-legend-background",legendItem:"c3-legend-item",legendItemEvent:"c3-legend-item-event",legendItemTile:"c3-legend-item-tile",legendItemHidden:"c3-legend-item-hidden",legendItemFocused:"c3-legend-item-focused",dragarea:"c3-dragarea",EXPANDED:"_expanded_",SELECTED:"_selected_",INCLUDED:"_included_"},o="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},s=function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")},c=function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function, not "+typeof e);t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}),e&&(Object.setPrototypeOf?Object.setPrototypeOf(t,e):t.__proto__=e)},d=function(t,e){if(!t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!e||"object"!=typeof e&&"function"!=typeof e?t:e},l=function(t){return t||0===t},u=function(t){return"function"==typeof t},h=function(t){return Array.isArray(t)},g=function(t){return"string"==typeof t},f=function(t){return void 0===t},p=function(t){return void 0!==t},_=function(t){return 10*Math.ceil(t/10)},x=function(t){return Math.ceil(t)+.5},m=function(t){return t[1]-t[0]},y=function(t){return void 0===t||null===t||g(t)&&0===t.length||"object"===(void 0===t?"undefined":o(t))&&0===Object.keys(t).length},S=function(t){return!C.isEmpty(t)},w=function(t,e,i){return void 0!==t[e]?t[e]:i},v=function(t,e){var i=!1;return Object.keys(t).forEach(function(n){t[n]===e&&(i=!0)}),i},b=function(t){return"string"==typeof t?t.replace(//g,">"):t},T=function(t){var e=t.getBoundingClientRect(),i=[t.pathSegList.getItem(0),t.pathSegList.getItem(1)];return{x:i[0].x,y:Math.min(i[0].y,i[1].y),width:e.width,height:e.height}};(a=t.prototype).axisX=function(t,e,i){t.attr("transform",function(t){return"translate("+Math.ceil(e(t)+i)+", 0)"})},a.axisY=function(t,e){t.attr("transform",function(t){return"translate(0,"+Math.ceil(e(t))+")"})},a.scaleExtent=function(t){var e=t[0],i=t[t.length-1];return e0&&a[0]>0&&a.unshift(a[0]-(a[1]-a[0])),a},a.copyScale=function(){var t,e=this,i=e.scale.copy();return e.params.isCategory&&(t=e.scale.domain(),i.domain([t[0],t[1]-1])),i},a.textFormatted=function(t){var e=this,i=e.tickFormat?e.tickFormat(t):t;return void 0!==i?i:""},a.updateRange=function(){var t=this;return t.range=t.scale.rangeExtent?t.scale.rangeExtent():t.scaleExtent(t.scale.range()),t.range},a.updateTickTextCharSize=function(t){var e=this;if(e.tickTextCharSize)return e.tickTextCharSize;var i={h:11.5,w:5.5};return t.select("text").text(function(t){return e.textFormatted(t)}).each(function(t){var n=this.getBoundingClientRect(),a=e.textFormatted(t),r=n.height,o=a?n.width/a.length:void 0;r&&o&&(i.h=r,i.w=o)}).text(""),e.tickTextCharSize=i,i},a.transitionise=function(t){return this.params.withoutTransition?t:this.d3.transition(t)},a.isVertical=function(){return"left"===this.orient||"right"===this.orient},a.tspanData=function(t,e,i,n){var a=this,r=a.params.tickMultiline?a.splitTickText(t,i,n):[].concat(a.textFormatted(t));return r.map(function(t){return{index:e,splitted:t,length:r.length}})},a.splitTickText=function(t,e,i){function n(t,e){r=void 0;for(var i=1;i0?1:-1):t.tickLength},a.textTransform=function(){var t=this.tickTextRotate;return t?"rotate("+t+")":""},a.textTextAnchor=function(){var t=this.tickTextRotate;return t?t>0?"start":"end":"middle"},a.tspanDx=function(){var t=this.tickTextRotate;return t?8*Math.sin(Math.PI*(t/180)):0},a.tspanDy=function(t,e){var i=this,n=i.tickTextCharSize.h;return 0===e&&(n=i.isVertical()?-((t.length-1)*(i.tickTextCharSize.h/2)-3):".71em"),n},a.generateAxis=function(){function t(a){a.each(function(){var a,r,o,s=t.g=i.select(this),c=this.__chart__||e.scale,d=this.__chart__=e.copyScale(),l=e.tickValues?e.tickValues:e.generateTicks(d),u=s.selectAll(".tick").data(l,d),h=u.enter().insert("g",".domain").attr("class","tick").style("opacity",1e-6),g=u.exit().remove(),f=e.transitionise(u).style("opacity",1);n.isCategory?(e.tickOffset=Math.ceil((d(1)-d(0))/2),r=e.tickCentered?0:e.tickOffset,o=e.tickCentered?e.tickOffset:0):e.tickOffset=r=0,h.append("line"),h.append("text"),e.updateRange(),e.updateTickLength(),e.updateTickTextCharSize(s.select(".tick"));var p=f.select("line"),_=f.select("text"),x=u.select("text").selectAll("tspan").data(function(t,i){return e.tspanData(t,i,l,d)});x.enter().append("tspan"),x.exit().remove(),x.text(function(t){return t.splitted});var m=s.selectAll(".domain").data([0]),y=(m.enter().append("path").attr("class","domain"),e.transitionise(m));switch(e.orient){case"bottom":a=e.axisX,p.attr("x1",r).attr("x2",r).attr("y2",function(t,i){return e.lineY2(t,i)}),_.attr("x",0).attr("y",function(t,i){return e.textY(t,i)}).attr("transform",function(t,i){return e.textTransform(t,i)}).style("text-anchor",function(t,i){return e.textTextAnchor(t,i)}),x.attr("x",0).attr("dy",function(t,i){return e.tspanDy(t,i)}).attr("dx",function(t,i){return e.tspanDx(t,i)}),y.attr("d","M"+e.range[0]+","+e.outerTickSize+"V0H"+e.range[1]+"V"+e.outerTickSize);break;case"top":a=e.axisX,p.attr("x2",0).attr("y2",-e.innerTickSize),_.attr("x",0).attr("y",-e.tickLength).style("text-anchor","middle"),x.attr("x",0).attr("dy","0em"),y.attr("d","M"+e.range[0]+","+-e.outerTickSize+"V0H"+e.range[1]+"V"+-e.outerTickSize);break;case"left":a=e.axisY,p.attr("x2",-e.innerTickSize).attr("y1",o).attr("y2",o),_.attr("x",-e.tickLength).attr("y",e.tickOffset).style("text-anchor","end"),x.attr("x",-e.tickLength).attr("dy",function(t,i){return e.tspanDy(t,i)}),y.attr("d","M"+-e.outerTickSize+","+e.range[0]+"H0V"+e.range[1]+"H"+-e.outerTickSize);break;case"right":a=e.axisY,p.attr("x2",e.innerTickSize).attr("y2",0),_.attr("x",e.tickLength).attr("y",0).style("text-anchor","start"),x.attr("x",e.tickLength).attr("dy",function(t,i){return e.tspanDy(t,i)}),y.attr("d","M"+e.outerTickSize+","+e.range[0]+"H0V"+e.range[1]+"H"+e.outerTickSize)}if(d.rangeBand){var S=d,w=S.rangeBand()/2;c=d=function(t){return S(t)+w}}else c.rangeBand?c=d:g.call(a,d,e.tickOffset);h.call(a,c,e.tickOffset),f.call(a,d,e.tickOffset)})}var e=this,i=e.d3,n=e.params;return t.scale=function(i){return arguments.length?(e.scale=i,t):e.scale},t.orient=function(i){return arguments.length?(e.orient=i in{top:1,right:1,bottom:1,left:1}?i+"":"bottom",t):e.orient},t.tickFormat=function(i){return arguments.length?(e.tickFormat=i,t):e.tickFormat},t.tickCentered=function(i){return arguments.length?(e.tickCentered=i,t):e.tickCentered},t.tickOffset=function(){return e.tickOffset},t.tickInterval=function(){var i;return i=n.isCategory?2*e.tickOffset:(t.g.select("path.domain").node().getTotalLength()-2*e.outerTickSize)/t.g.selectAll("line").size(),i===1/0?0:i},t.ticks=function(){return arguments.length?(e.tickArguments=arguments,t):e.tickArguments},t.tickCulling=function(i){return arguments.length?(e.tickCulling=i,t):e.tickCulling},t.tickValues=function(i){if("function"==typeof i)e.tickValues=function(){return i(e.scale.domain())};else{if(!arguments.length)return e.tickValues;e.tickValues=i}return t},t};var A=function(e){function i(e){s(this,i);var r={fn:n,internal:{fn:a}},o=d(this,(i.__proto__||Object.getPrototypeOf(i)).call(this,e,"axis",r));return o.d3=e.d3,o.internal=t,o}return c(i,e),i}(function(t,e,i){this.owner=t,L.chart.internal[e]=i});(n=A.prototype).init=function(){var t=this.owner,e=t.config,i=t.main;t.axes.x=i.append("g").attr("class",r.axis+" "+r.axisX).attr("clip-path",t.clipPathForXAxis).attr("transform",t.getTranslate("x")).style("visibility",e.axis_x_show?"visible":"hidden"),t.axes.x.append("text").attr("class",r.axisXLabel).attr("transform",e.axis_rotated?"rotate(-90)":"").style("text-anchor",this.textAnchorForXAxisLabel.bind(this)),t.axes.y=i.append("g").attr("class",r.axis+" "+r.axisY).attr("clip-path",e.axis_y_inner?"":t.clipPathForYAxis).attr("transform",t.getTranslate("y")).style("visibility",e.axis_y_show?"visible":"hidden"),t.axes.y.append("text").attr("class",r.axisYLabel).attr("transform",e.axis_rotated?"":"rotate(-90)").style("text-anchor",this.textAnchorForYAxisLabel.bind(this)),t.axes.y2=i.append("g").attr("class",r.axis+" "+r.axisY2).attr("transform",t.getTranslate("y2")).style("visibility",e.axis_y2_show?"visible":"hidden"),t.axes.y2.append("text").attr("class",r.axisY2Label).attr("transform",e.axis_rotated?"":"rotate(-90)").style("text-anchor",this.textAnchorForY2AxisLabel.bind(this))},n.getXAxis=function(t,e,i,n,a,r,o){var s=this.owner,c=s.config,d={isCategory:s.isCategorized(),withOuterTick:a,tickMultiline:c.axis_x_tick_multiline,tickWidth:c.axis_x_tick_width,tickTextRotate:o?0:c.axis_x_tick_rotate,withoutTransition:r},l=new this.internal(this,d).axis.scale(t).orient(e);return s.isTimeSeries()&&n&&"function"!=typeof n&&(n=n.map(function(t){return s.parseDate(t)})),l.tickFormat(i).tickValues(n),s.isCategorized()&&(l.tickCentered(c.axis_x_tick_centered),y(c.axis_x_tick_culling)&&(c.axis_x_tick_culling=!1)),l},n.updateXAxisTickValues=function(t,e){var i,n=this.owner,a=n.config;return(a.axis_x_tick_fit||a.axis_x_tick_count)&&(i=this.generateTickValues(n.mapTargetsToUniqueXs(t),a.axis_x_tick_count,n.isTimeSeries())),e?e.tickValues(i):(n.xAxis.tickValues(i),n.subXAxis.tickValues(i)),i},n.getYAxis=function(t,e,i,n,a,r,o){var s=this.owner,c=s.config,d={withOuterTick:a,withoutTransition:r,tickTextRotate:o?0:c.axis_y_tick_rotate},l=new this.internal(this,d).axis.scale(t).orient(e).tickFormat(i);return s.isTimeSeriesY()?l.ticks(s.d3.time[c.axis_y_tick_time_value],c.axis_y_tick_time_interval):l.tickValues(n),l},n.getId=function(t){var e=this.owner.config;return t in e.data_axes?e.data_axes[t]:"y"},n.getXAxisTickFormat=function(){var t=this.owner,e=t.config,i=t.isTimeSeries()?t.defaultAxisTimeFormat:t.isCategorized()?t.categoryName:function(t){return t<0?t.toFixed(0):t};return e.axis_x_tick_format&&(u(e.axis_x_tick_format)?i=e.axis_x_tick_format:t.isTimeSeries()&&(i=function(i){return i?t.axisTimeFormat(e.axis_x_tick_format)(i):""})),u(i)?function(e){return i.call(t,e)}:i},n.getTickValues=function(t,e){return t||(e?e.tickValues():void 0)},n.getXAxisTickValues=function(){return this.getTickValues(this.owner.config.axis_x_tick_values,this.owner.xAxis)},n.getYAxisTickValues=function(){return this.getTickValues(this.owner.config.axis_y_tick_values,this.owner.yAxis)},n.getY2AxisTickValues=function(){return this.getTickValues(this.owner.config.axis_y2_tick_values,this.owner.y2Axis)},n.getLabelOptionByAxisId=function(t){var e,i=this.owner.config;return"y"===t?e=i.axis_y_label:"y2"===t?e=i.axis_y2_label:"x"===t&&(e=i.axis_x_label),e},n.getLabelText=function(t){var e=this.getLabelOptionByAxisId(t);return g(e)?e:e?e.text:null},n.setLabelText=function(t,e){var i=this.owner.config,n=this.getLabelOptionByAxisId(t);g(n)?"y"===t?i.axis_y_label=e:"y2"===t?i.axis_y2_label=e:"x"===t&&(i.axis_x_label=e):n&&(n.text=e)},n.getLabelPosition=function(t,e){var i=this.getLabelOptionByAxisId(t),n=i&&"object"===(void 0===i?"undefined":o(i))&&i.position?i.position:e;return{isInner:n.indexOf("inner")>=0,isOuter:n.indexOf("outer")>=0,isLeft:n.indexOf("left")>=0,isCenter:n.indexOf("center")>=0,isRight:n.indexOf("right")>=0,isTop:n.indexOf("top")>=0,isMiddle:n.indexOf("middle")>=0,isBottom:n.indexOf("bottom")>=0}},n.getXAxisLabelPosition=function(){return this.getLabelPosition("x",this.owner.config.axis_rotated?"inner-top":"inner-right")},n.getYAxisLabelPosition=function(){return this.getLabelPosition("y",this.owner.config.axis_rotated?"inner-right":"inner-top")},n.getY2AxisLabelPosition=function(){return this.getLabelPosition("y2",this.owner.config.axis_rotated?"inner-right":"inner-top")},n.getLabelPositionById=function(t){return"y2"===t?this.getY2AxisLabelPosition():"y"===t?this.getYAxisLabelPosition():this.getXAxisLabelPosition()},n.textForXAxisLabel=function(){return this.getLabelText("x")},n.textForYAxisLabel=function(){return this.getLabelText("y")},n.textForY2AxisLabel=function(){return this.getLabelText("y2")},n.xForAxisLabel=function(t,e){var i=this.owner;return t?e.isLeft?0:e.isCenter?i.width/2:i.width:e.isBottom?-i.height:e.isMiddle?-i.height/2:0},n.dxForAxisLabel=function(t,e){return t?e.isLeft?"0.5em":e.isRight?"-0.5em":"0":e.isTop?"-0.5em":e.isBottom?"0.5em":"0"},n.textAnchorForAxisLabel=function(t,e){return t?e.isLeft?"start":e.isCenter?"middle":"end":e.isBottom?"start":e.isMiddle?"middle":"end"},n.xForXAxisLabel=function(){return this.xForAxisLabel(!this.owner.config.axis_rotated,this.getXAxisLabelPosition())},n.xForYAxisLabel=function(){return this.xForAxisLabel(this.owner.config.axis_rotated,this.getYAxisLabelPosition())},n.xForY2AxisLabel=function(){return this.xForAxisLabel(this.owner.config.axis_rotated,this.getY2AxisLabelPosition())},n.dxForXAxisLabel=function(){return this.dxForAxisLabel(!this.owner.config.axis_rotated,this.getXAxisLabelPosition())},n.dxForYAxisLabel=function(){return this.dxForAxisLabel(this.owner.config.axis_rotated,this.getYAxisLabelPosition())},n.dxForY2AxisLabel=function(){return this.dxForAxisLabel(this.owner.config.axis_rotated,this.getY2AxisLabelPosition())},n.dyForXAxisLabel=function(){var t=this.owner.config,e=this.getXAxisLabelPosition();return t.axis_rotated?e.isInner?"1.2em":-25-this.getMaxTickWidth("x"):e.isInner?"-0.5em":t.axis_x_height?t.axis_x_height-10:"3em"},n.dyForYAxisLabel=function(){var t=this.owner,e=this.getYAxisLabelPosition();return t.config.axis_rotated?e.isInner?"-0.5em":"3em":e.isInner?"1.2em":-10-(t.config.axis_y_inner?0:this.getMaxTickWidth("y")+10)},n.dyForY2AxisLabel=function(){var t=this.owner,e=this.getY2AxisLabelPosition();return t.config.axis_rotated?e.isInner?"1.2em":"-2.2em":e.isInner?"-0.5em":15+(t.config.axis_y2_inner?0:this.getMaxTickWidth("y2")+15)},n.textAnchorForXAxisLabel=function(){var t=this.owner;return this.textAnchorForAxisLabel(!t.config.axis_rotated,this.getXAxisLabelPosition())},n.textAnchorForYAxisLabel=function(){var t=this.owner;return this.textAnchorForAxisLabel(t.config.axis_rotated,this.getYAxisLabelPosition())},n.textAnchorForY2AxisLabel=function(){var t=this.owner;return this.textAnchorForAxisLabel(t.config.axis_rotated,this.getY2AxisLabelPosition())},n.getMaxTickWidth=function(t,e){var i,n,a,r,o=this.owner,s=o.config,c=0;return e&&o.currentMaxTickWidths[t]?o.currentMaxTickWidths[t]:(o.svg&&(i=o.filterTargetsToShow(o.data.targets),"y"===t?(n=o.y.copy().domain(o.getYDomain(i,"y")),a=this.getYAxis(n,o.yOrient,s.axis_y_tick_format,o.yAxisTickValues,!1,!0,!0)):"y2"===t?(n=o.y2.copy().domain(o.getYDomain(i,"y2")),a=this.getYAxis(n,o.y2Orient,s.axis_y2_tick_format,o.y2AxisTickValues,!1,!0,!0)):(n=o.x.copy().domain(o.getXDomain(i)),a=this.getXAxis(n,o.xOrient,o.xAxisTickFormat,o.xAxisTickValues,!1,!0,!0),this.updateXAxisTickValues(i,a)),(r=o.d3.select("body").append("div").classed("c3",!0)).append("svg").style("visibility","hidden").style("position","fixed").style("top",0).style("left",0).append("g").call(a).each(function(){o.d3.select(this).selectAll("text").each(function(){var t=this.getBoundingClientRect();c2){for(o=n-2,a=t[0],s=((r=t[t.length-1])-a)/(o+1),l=[a],c=0;c=0&&k.select(this).style("display",e%V?"none":"block")})}else I.svg.selectAll("."+r.axisX+" .tick text").style("display","block");_=I.generateDrawArea?I.generateDrawArea(F,!1):void 0,x=I.generateDrawBar?I.generateDrawBar(X):void 0,m=I.generateDrawLine?I.generateDrawLine(M,!1):void 0,y=I.generateXYForText(F,X,M,!0),S=I.generateXYForText(F,X,M,!1),i&&(I.subY.domain(I.getYDomain(H,"y")),I.subY2.domain(I.getYDomain(H,"y2"))),I.updateXgridFocus(),R.select("text."+r.text+"."+r.empty).attr("x",I.width/2).attr("y",I.height/2).text(D.data_empty_label_text).transition().style("opacity",H.length?0:1),I.updateGrid(v),I.updateRegion(v),I.updateBar(b),I.updateLine(b),I.updateArea(b),I.updateCircle(),I.hasDataLabel()&&I.updateText(b),I.redrawTitle&&I.redrawTitle(),I.redrawArc&&I.redrawArc(v,b,c),I.redrawSubchart&&I.redrawSubchart(n,e,v,b,F,X,M),R.selectAll("."+r.selectedCircles).filter(I.isBarType.bind(I)).selectAll("circle").remove(),D.interaction_enabled&&!t.flow&&g&&(I.redrawEventRect(),I.updateZoom&&I.updateZoom()),I.updateCircleY(),E=(I.config.axis_rotated?I.circleY:I.circleX).bind(I),O=(I.config.axis_rotated?I.circleX:I.circleY).bind(I),t.flow&&(P=I.generateFlow({targets:H,flow:t.flow,duration:t.flow.duration,drawBar:x,drawLine:m,drawArea:_,cx:E,cy:O,xv:B,xForText:y,yForText:S})),(v||P)&&I.isTabVisible()?k.transition().duration(v).each(function(){var e=[];[I.redrawBar(x,!0),I.redrawLine(m,!0),I.redrawArea(_,!0),I.redrawCircle(E,O,!0),I.redrawText(y,S,t.flow,!0),I.redrawRegion(!0),I.redrawGrid(!0)].forEach(function(t){t.forEach(function(t){e.push(t)})}),A=I.generateWait(),e.forEach(function(t){A.add(t)})}).call(A,function(){P&&P(),D.onrendered&&D.onrendered.call(I)}):(I.redrawBar(x),I.redrawLine(m),I.redrawArea(_),I.redrawCircle(E,O),I.redrawText(y,S,t.flow),I.redrawRegion(),I.redrawGrid(),D.onrendered&&D.onrendered.call(I)),I.mapToIds(I.data.targets).forEach(function(t){I.withoutFadeIn[t]=!0})},C.updateAndRedraw=function(t){var e,i=this,n=i.config;(t=t||{}).withTransition=w(t,"withTransition",!0),t.withTransform=w(t,"withTransform",!1),t.withLegend=w(t,"withLegend",!1),t.withUpdateXDomain=!0,t.withUpdateOrgXDomain=!0,t.withTransitionForExit=!1,t.withTransitionForTransform=w(t,"withTransitionForTransform",t.withTransition),i.updateSizes(),t.withLegend&&n.legend_show||(e=i.axis.generateTransitions(t.withTransitionForAxis?n.transition_duration:0),i.updateScales(),i.updateSvgSize(),i.transformAll(t.withTransitionForTransform,e)),i.redraw(t,e)},C.redrawWithoutRescale=function(){this.redraw({withY:!1,withSubchart:!1,withEventRect:!1,withTransitionForAxis:!1})},C.isTimeSeries=function(){return"timeseries"===this.config.axis_x_type},C.isCategorized=function(){return this.config.axis_x_type.indexOf("categor")>=0},C.isCustomX=function(){var t=this,e=t.config;return!t.isTimeSeries()&&(e.data_x||S(e.data_xs))},C.isTimeSeriesY=function(){return"timeseries"===this.config.axis_y_type},C.getTranslate=function(t){var e,i,n=this,a=n.config;return"main"===t?(e=x(n.margin.left),i=x(n.margin.top)):"context"===t?(e=x(n.margin2.left),i=x(n.margin2.top)):"legend"===t?(e=n.margin3.left,i=n.margin3.top):"x"===t?(e=0,i=a.axis_rotated?0:n.height):"y"===t?(e=0,i=a.axis_rotated?n.height:0):"y2"===t?(e=a.axis_rotated?0:n.width,i=a.axis_rotated?1:0):"subx"===t?(e=0,i=a.axis_rotated?0:n.height2):"arc"===t&&(e=n.arcWidth/2,i=n.arcHeight/2),"translate("+e+","+i+")"},C.initialOpacity=function(t){return null!==t.value&&this.withoutFadeIn[t.id]?1:0},C.initialOpacityForCircle=function(t){return null!==t.value&&this.withoutFadeIn[t.id]?this.opacityForCircle(t):0},C.opacityForCircle=function(t){var e=(u(this.config.point_show)?this.config.point_show(t):this.config.point_show)?1:0;return l(t.value)?this.isScatterType(t)?.5:e:0},C.opacityForText=function(){return this.hasDataLabel()?1:0},C.xx=function(t){return t?this.x(t.x):null},C.xv=function(t){var e=this,i=t.value;return e.isTimeSeries()?i=e.parseDate(t.value):e.isCategorized()&&"string"==typeof t.value&&(i=e.config.axis_x_categories.indexOf(t.value)),Math.ceil(e.x(i))},C.yv=function(t){var e=this,i=t.axis&&"y2"===t.axis?e.y2:e.y;return Math.ceil(i(t.value))},C.subxx=function(t){return t?this.subX(t.x):null},C.transformMain=function(t,e){var i,n,a,o=this;e&&e.axisX?i=e.axisX:(i=o.main.select("."+r.axisX),t&&(i=i.transition())),e&&e.axisY?n=e.axisY:(n=o.main.select("."+r.axisY),t&&(n=n.transition())),e&&e.axisY2?a=e.axisY2:(a=o.main.select("."+r.axisY2),t&&(a=a.transition())),(t?o.main.transition():o.main).attr("transform",o.getTranslate("main")),i.attr("transform",o.getTranslate("x")),n.attr("transform",o.getTranslate("y")),a.attr("transform",o.getTranslate("y2")),o.main.select("."+r.chartArcs).attr("transform",o.getTranslate("arc"))},C.transformAll=function(t,e){var i=this;i.transformMain(t,e),i.config.subchart_show&&i.transformContext(t,e),i.legend&&i.transformLegend(t)},C.updateSvgSize=function(){var t=this,e=t.svg.select(".c3-brush .background");t.svg.attr("width",t.currentWidth).attr("height",t.currentHeight),t.svg.selectAll(["#"+t.clipId,"#"+t.clipIdForGrid]).select("rect").attr("width",t.width).attr("height",t.height),t.svg.select("#"+t.clipIdForXAxis).select("rect").attr("x",t.getXAxisClipX.bind(t)).attr("y",t.getXAxisClipY.bind(t)).attr("width",t.getXAxisClipWidth.bind(t)).attr("height",t.getXAxisClipHeight.bind(t)),t.svg.select("#"+t.clipIdForYAxis).select("rect").attr("x",t.getYAxisClipX.bind(t)).attr("y",t.getYAxisClipY.bind(t)).attr("width",t.getYAxisClipWidth.bind(t)).attr("height",t.getYAxisClipHeight.bind(t)),t.svg.select("#"+t.clipIdForSubchart).select("rect").attr("width",t.width).attr("height",e.size()?e.attr("height"):0),t.svg.select("."+r.zoomRect).attr("width",t.width).attr("height",t.height),t.selectChart.style("max-height",t.currentHeight+"px")},C.updateDimension=function(t){var e=this;t||(e.config.axis_rotated?(e.axes.x.call(e.xAxis),e.axes.subx.call(e.subXAxis)):(e.axes.y.call(e.yAxis),e.axes.y2.call(e.y2Axis))),e.updateSizes(),e.updateScales(),e.updateSvgSize(),e.transformAll(!1)},C.observeInserted=function(t){var e,i=this;"undefined"!=typeof MutationObserver?(e=new MutationObserver(function(n){n.forEach(function(n){"childList"===n.type&&n.previousSibling&&(e.disconnect(),i.intervalForObserveInserted=window.setInterval(function(){t.node().parentNode&&(window.clearInterval(i.intervalForObserveInserted),i.updateDimension(),i.brush&&i.brush.update(),i.config.oninit.call(i),i.redraw({withTransform:!0,withUpdateXDomain:!0,withUpdateOrgXDomain:!0,withTransition:!1,withTransitionForTransform:!1,withLegend:!0}),t.transition().style("opacity",1))},10))})})).observe(t.node(),{attributes:!0,childList:!0,characterData:!0}):window.console.error("MutationObserver not defined.")},C.bindResize=function(){var t=this,e=t.config;if(t.resizeFunction=t.generateResize(),t.resizeFunction.add(function(){e.onresize.call(t)}),e.resize_auto&&t.resizeFunction.add(function(){void 0!==t.resizeTimeout&&window.clearTimeout(t.resizeTimeout),t.resizeTimeout=window.setTimeout(function(){delete t.resizeTimeout,t.api.flush()},100)}),t.resizeFunction.add(function(){e.onresized.call(t)}),window.attachEvent)window.attachEvent("onresize",t.resizeFunction);else if(window.addEventListener)window.addEventListener("resize",t.resizeFunction,!1);else{var i=window.onresize;i?i.add&&i.remove||(i=t.generateResize()).add(window.onresize):i=t.generateResize(),i.add(t.resizeFunction),window.onresize=i}},C.generateResize=function(){function t(){e.forEach(function(t){t()})}var e=[];return t.add=function(t){e.push(t)},t.remove=function(t){for(var i=0;ie.getTotalLength())break;i--}while(i>0);return i})),"SVGPathSegList"in window||(window.SVGPathSegList=function(t){this._pathElement=t,this._list=this._parsePath(this._pathElement.getAttribute("d")),this._mutationObserverConfig={attributes:!0,attributeFilter:["d"]},this._pathElementMutationObserver=new MutationObserver(this._updateListFromPathMutations.bind(this)),this._pathElementMutationObserver.observe(this._pathElement,this._mutationObserverConfig)},window.SVGPathSegList.prototype.classname="SVGPathSegList",Object.defineProperty(window.SVGPathSegList.prototype,"numberOfItems",{get:function(){return this._checkPathSynchronizedToList(),this._list.length},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"pathSegList",{get:function(){return this._pathSegList||(this._pathSegList=new window.SVGPathSegList(this)),this._pathSegList},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"normalizedPathSegList",{get:function(){return this.pathSegList},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"animatedPathSegList",{get:function(){return this.pathSegList},enumerable:!0}),Object.defineProperty(window.SVGPathElement.prototype,"animatedNormalizedPathSegList",{get:function(){return this.pathSegList},enumerable:!0}),window.SVGPathSegList.prototype._checkPathSynchronizedToList=function(){this._updateListFromPathMutations(this._pathElementMutationObserver.takeRecords())},window.SVGPathSegList.prototype._updateListFromPathMutations=function(t){if(this._pathElement){var e=!1;t.forEach(function(t){"d"==t.attributeName&&(e=!0)}),e&&(this._list=this._parsePath(this._pathElement.getAttribute("d")))}},window.SVGPathSegList.prototype._writeListToPath=function(){this._pathElementMutationObserver.disconnect(),this._pathElement.setAttribute("d",window.SVGPathSegList._pathSegArrayAsString(this._list)),this._pathElementMutationObserver.observe(this._pathElement,this._mutationObserverConfig)},window.SVGPathSegList.prototype.segmentChanged=function(t){this._writeListToPath()},window.SVGPathSegList.prototype.clear=function(){this._checkPathSynchronizedToList(),this._list.forEach(function(t){t._owningPathSegList=null}),this._list=[],this._writeListToPath()},window.SVGPathSegList.prototype.initialize=function(t){return this._checkPathSynchronizedToList(),this._list=[t],t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList.prototype._checkValidIndex=function(t){if(isNaN(t)||t<0||t>=this.numberOfItems)throw"INDEX_SIZE_ERR"},window.SVGPathSegList.prototype.getItem=function(t){return this._checkPathSynchronizedToList(),this._checkValidIndex(t),this._list[t]},window.SVGPathSegList.prototype.insertItemBefore=function(t,e){return this._checkPathSynchronizedToList(),e>this.numberOfItems&&(e=this.numberOfItems),t._owningPathSegList&&(t=t.clone()),this._list.splice(e,0,t),t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList.prototype.replaceItem=function(t,e){return this._checkPathSynchronizedToList(),t._owningPathSegList&&(t=t.clone()),this._checkValidIndex(e),this._list[e]=t,t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList.prototype.removeItem=function(t){this._checkPathSynchronizedToList(),this._checkValidIndex(t);var e=this._list[t];return this._list.splice(t,1),this._writeListToPath(),e},window.SVGPathSegList.prototype.appendItem=function(t){return this._checkPathSynchronizedToList(),t._owningPathSegList&&(t=t.clone()),this._list.push(t),t._owningPathSegList=this,this._writeListToPath(),t},window.SVGPathSegList._pathSegArrayAsString=function(t){var e="",i=!0;return t.forEach(function(t){i?(i=!1,e+=t._asPathString()):e+=" "+t._asPathString()}),e},window.SVGPathSegList.prototype._parsePath=function(t){if(!t||0==t.length)return[];var e=this,i=function(){this.pathSegList=[]};i.prototype.appendSegment=function(t){this.pathSegList.push(t)};var n=function(t){this._string=t,this._currentIndex=0,this._endIndex=this._string.length,this._previousCommand=window.SVGPathSeg.PATHSEG_UNKNOWN,this._skipOptionalSpaces()};n.prototype._isCurrentSpace=function(){var t=this._string[this._currentIndex];return t<=" "&&(" "==t||"\n"==t||"\t"==t||"\r"==t||"\f"==t)},n.prototype._skipOptionalSpaces=function(){for(;this._currentIndex="0"&&t<="9")&&e!=window.SVGPathSeg.PATHSEG_CLOSEPATH?e==window.SVGPathSeg.PATHSEG_MOVETO_ABS?window.SVGPathSeg.PATHSEG_LINETO_ABS:e==window.SVGPathSeg.PATHSEG_MOVETO_REL?window.SVGPathSeg.PATHSEG_LINETO_REL:e:window.SVGPathSeg.PATHSEG_UNKNOWN},n.prototype.initialCommandIsMoveTo=function(){if(!this.hasMoreData())return!0;var t=this.peekSegmentType();return t==window.SVGPathSeg.PATHSEG_MOVETO_ABS||t==window.SVGPathSeg.PATHSEG_MOVETO_REL},n.prototype._parseNumber=function(){var t=0,e=0,i=1,n=0,a=1,r=1,o=this._currentIndex;if(this._skipOptionalSpaces(),this._currentIndex"9")&&"."!=this._string.charAt(this._currentIndex))){for(var s=this._currentIndex;this._currentIndex="0"&&this._string.charAt(this._currentIndex)<="9";)this._currentIndex++;if(this._currentIndex!=s)for(var c=this._currentIndex-1,d=1;c>=s;)e+=d*(this._string.charAt(c--)-"0"),d*=10;if(this._currentIndex=this._endIndex||this._string.charAt(this._currentIndex)<"0"||this._string.charAt(this._currentIndex)>"9")return;for(;this._currentIndex="0"&&this._string.charAt(this._currentIndex)<="9";)i*=10,n+=(this._string.charAt(this._currentIndex)-"0")/i,this._currentIndex+=1}if(this._currentIndex!=o&&this._currentIndex+1=this._endIndex||this._string.charAt(this._currentIndex)<"0"||this._string.charAt(this._currentIndex)>"9")return;for(;this._currentIndex="0"&&this._string.charAt(this._currentIndex)<="9";)t*=10,t+=this._string.charAt(this._currentIndex)-"0",this._currentIndex++}var l=e+n;if(l*=a,t&&(l*=Math.pow(10,r*t)),o!=this._currentIndex)return this._skipOptionalSpacesOrDelimiter(),l}},n.prototype._parseArcFlag=function(){if(!(this._currentIndex>=this._endIndex)){var t=!1,e=this._string.charAt(this._currentIndex++);if("0"==e)t=!1;else{if("1"!=e)return;t=!0}return this._skipOptionalSpacesOrDelimiter(),t}},n.prototype.parseSegment=function(){var t=this._string[this._currentIndex],i=this._pathSegTypeFromChar(t);if(i==window.SVGPathSeg.PATHSEG_UNKNOWN){if(this._previousCommand==window.SVGPathSeg.PATHSEG_UNKNOWN)return null;if((i=this._nextCommandHelper(t,this._previousCommand))==window.SVGPathSeg.PATHSEG_UNKNOWN)return null}else this._currentIndex++;switch(this._previousCommand=i,i){case window.SVGPathSeg.PATHSEG_MOVETO_REL:return new window.SVGPathSegMovetoRel(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_MOVETO_ABS:return new window.SVGPathSegMovetoAbs(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_REL:return new window.SVGPathSegLinetoRel(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_ABS:return new window.SVGPathSegLinetoAbs(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_HORIZONTAL_REL:return new window.SVGPathSegLinetoHorizontalRel(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_HORIZONTAL_ABS:return new window.SVGPathSegLinetoHorizontalAbs(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_VERTICAL_REL:return new window.SVGPathSegLinetoVerticalRel(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_LINETO_VERTICAL_ABS:return new window.SVGPathSegLinetoVerticalAbs(e,this._parseNumber());case window.SVGPathSeg.PATHSEG_CLOSEPATH:return this._skipOptionalSpaces(),new window.SVGPathSegClosePath(e);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_REL:return n={x1:this._parseNumber(),y1:this._parseNumber(),x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicRel(e,n.x,n.y,n.x1,n.y1,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_ABS:return n={x1:this._parseNumber(),y1:this._parseNumber(),x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicAbs(e,n.x,n.y,n.x1,n.y1,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_SMOOTH_REL:return n={x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicSmoothRel(e,n.x,n.y,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_CUBIC_SMOOTH_ABS:return n={x2:this._parseNumber(),y2:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoCubicSmoothAbs(e,n.x,n.y,n.x2,n.y2);case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_REL:return n={x1:this._parseNumber(),y1:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoQuadraticRel(e,n.x,n.y,n.x1,n.y1);case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_ABS:return n={x1:this._parseNumber(),y1:this._parseNumber(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegCurvetoQuadraticAbs(e,n.x,n.y,n.x1,n.y1);case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_SMOOTH_REL:return new window.SVGPathSegCurvetoQuadraticSmoothRel(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_CURVETO_QUADRATIC_SMOOTH_ABS:return new window.SVGPathSegCurvetoQuadraticSmoothAbs(e,this._parseNumber(),this._parseNumber());case window.SVGPathSeg.PATHSEG_ARC_REL:return n={x1:this._parseNumber(),y1:this._parseNumber(),arcAngle:this._parseNumber(),arcLarge:this._parseArcFlag(),arcSweep:this._parseArcFlag(),x:this._parseNumber(),y:this._parseNumber()},new window.SVGPathSegArcRel(e,n.x,n.y,n.x1,n.y1,n.arcAngle,n.arcLarge,n.arcSweep);case window.SVGPathSeg.PATHSEG_ARC_ABS:var n={x1:this._parseNumber(),y1:this._parseNumber(),arcAngle:this._parseNumber(),arcLarge:this._parseArcFlag(),arcSweep:this._parseArcFlag(),x:this._parseNumber(),y:this._parseNumber()};return new window.SVGPathSegArcAbs(e,n.x,n.y,n.x1,n.y1,n.arcAngle,n.arcLarge,n.arcSweep);default:throw"Unknown path seg type."}};var a=new i,r=new n(t);if(!r.initialCommandIsMoveTo())return[];for(;r.hasMoreData();){var o=r.parseSegment();if(!o)return[];a.appendSegment(o)}return a.pathSegList}),P.axis=function(){},P.axis.labels=function(t){var e=this.internal;arguments.length&&(Object.keys(t).forEach(function(i){e.axis.setLabelText(i,t[i])}),e.axis.updateLabels())},P.axis.max=function(t){var e=this.internal,i=e.config;if(!arguments.length)return{x:i.axis_x_max,y:i.axis_y_max,y2:i.axis_y2_max};"object"===(void 0===t?"undefined":o(t))?(l(t.x)&&(i.axis_x_max=t.x),l(t.y)&&(i.axis_y_max=t.y),l(t.y2)&&(i.axis_y2_max=t.y2)):i.axis_y_max=i.axis_y2_max=t,e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})},P.axis.min=function(t){var e=this.internal,i=e.config;if(!arguments.length)return{x:i.axis_x_min,y:i.axis_y_min,y2:i.axis_y2_min};"object"===(void 0===t?"undefined":o(t))?(l(t.x)&&(i.axis_x_min=t.x),l(t.y)&&(i.axis_y_min=t.y),l(t.y2)&&(i.axis_y2_min=t.y2)):i.axis_y_min=i.axis_y2_min=t,e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})},P.axis.range=function(t){if(!arguments.length)return{max:this.axis.max(),min:this.axis.min()};void 0!==t.max&&this.axis.max(t.max),void 0!==t.min&&this.axis.min(t.min)},P.category=function(t,e){var i=this.internal,n=i.config;return arguments.length>1&&(n.axis_x_categories[t]=e,i.redraw()),n.axis_x_categories[t]},P.categories=function(t){var e=this.internal,i=e.config;return arguments.length?(i.axis_x_categories=t,e.redraw(),i.axis_x_categories):i.axis_x_categories},P.resize=function(t){var e=this.internal.config;e.size_width=t?t.width:null,e.size_height=t?t.height:null,this.flush()},P.flush=function(){this.internal.updateAndRedraw({withLegend:!0,withTransition:!1,withTransitionForTransform:!1})},P.destroy=function(){var t=this.internal;if(window.clearInterval(t.intervalForObserveInserted),void 0!==t.resizeTimeout&&window.clearTimeout(t.resizeTimeout),window.detachEvent)window.detachEvent("onresize",t.resizeFunction);else if(window.removeEventListener)window.removeEventListener("resize",t.resizeFunction);else{var e=window.onresize;e&&e.add&&e.remove&&e.remove(t.resizeFunction)}return t.selectChart.classed("c3",!1).html(""),Object.keys(t).forEach(function(e){t[e]=null}),null},P.color=function(t){return this.internal.color(t)},P.data=function(t){var e=this.internal.data.targets;return void 0===t?e:e.filter(function(e){return[].concat(t).indexOf(e.id)>=0})},P.data.shown=function(t){return this.internal.filterTargetsToShow(this.data(t))},P.data.values=function(t){var e,i=null;return t&&(i=(e=this.data(t))[0]?e[0].values.map(function(t){return t.value}):null),i},P.data.names=function(t){return this.internal.clearLegendItemTextBoxCache(),this.internal.updateDataAttributes("names",t)},P.data.colors=function(t){return this.internal.updateDataAttributes("colors",t)},P.data.axes=function(t){return this.internal.updateDataAttributes("axes",t)},P.flow=function(t){var e,i,n,a,r,o,s,c=this.internal,d=[],u=c.getMaxDataCount(),h=0,g=0;if(t.json)i=c.convertJsonToData(t.json,t.keys);else if(t.rows)i=c.convertRowsToData(t.rows);else{if(!t.columns)return;i=c.convertColumnsToData(t.columns)}e=c.convertDataToTargets(i,!0),c.data.targets.forEach(function(t){var i,n,a=!1;for(i=0;i1?a.values[a.values.length-1].x-r.x:r.x-c.getXDomain(c.data.targets)[0]:1,n=[r.x-o,r.x],c.updateXDomain(null,!0,!0,!1,n)),c.updateTargets(c.data.targets),c.redraw({flow:{index:r.index,length:h,duration:l(t.duration)?t.duration:c.config.transition_duration,done:t.done,orgDataCount:u},withLegend:!0,withTransition:u>1,withTrimXDomain:!1,withUpdateXAxis:!0})},C.generateFlow=function(t){var e=this,i=e.config,n=e.d3;return function(){var a,o,s,c=t.targets,d=t.flow,l=t.drawBar,u=t.drawLine,h=t.drawArea,g=t.cx,f=t.cy,p=t.xv,_=t.xForText,x=t.yForText,y=t.duration,S=1,w=d.index,v=d.length,b=e.getValueOnIndex(e.data.targets[0].values,w),T=e.getValueOnIndex(e.data.targets[0].values,w+v),A=e.x.domain(),P=d.duration||y,C=d.done||function(){},L=e.generateWait(),V=e.xgrid||n.selectAll([]),G=e.xgridLines||n.selectAll([]),E=e.mainRegion||n.selectAll([]),O=e.mainText||n.selectAll([]),I=e.mainBar||n.selectAll([]),R=e.mainLine||n.selectAll([]),k=e.mainArea||n.selectAll([]),D=e.mainCircle||n.selectAll([]);e.flowing=!0,e.data.targets.forEach(function(t){t.values.splice(0,v)}),s=e.updateXDomain(c,!0,!0),e.updateXGrid&&e.updateXGrid(!0),d.orgDataCount?a=1===d.orgDataCount||(b&&b.x)===(T&&T.x)?e.x(A[0])-e.x(s[0]):e.isTimeSeries()?e.x(A[0])-e.x(s[0]):e.x(b.x)-e.x(T.x):1!==e.data.targets[0].values.length?a=e.x(A[0])-e.x(s[0]):e.isTimeSeries()?(b=e.getValueOnIndex(e.data.targets[0].values,0),T=e.getValueOnIndex(e.data.targets[0].values,e.data.targets[0].values.length-1),a=e.x(b.x)-e.x(T.x)):a=m(s)/2,S=m(A)/m(s),o="translate("+a+",0) scale("+S+",1)",e.hideXGridFocus(),n.transition().ease("linear").duration(P).each(function(){L.add(e.axes.x.transition().call(e.xAxis)),L.add(I.transition().attr("transform",o)),L.add(R.transition().attr("transform",o)),L.add(k.transition().attr("transform",o)),L.add(D.transition().attr("transform",o)),L.add(O.transition().attr("transform",o)),L.add(E.filter(e.isRegionOnX).transition().attr("transform",o)),L.add(V.transition().attr("transform",o)),L.add(G.transition().attr("transform",o))}).call(L,function(){var t,n=[],a=[],o=[];if(v){for(t=0;t=0&&(e=!0)}),!e)}),o.regions},P.selected=function(t){var e=this.internal,i=e.d3;return i.merge(e.main.selectAll("."+r.shapes+e.getTargetSelectorSuffix(t)).selectAll("."+r.shape).filter(function(){return i.select(this).classed(r.SELECTED)}).map(function(t){return t.map(function(t){var e=t.__data__;return e.data?e.data:e})}))},P.select=function(t,e,i){var n=this.internal,a=n.d3,o=n.config;o.data_selection_enabled&&n.main.selectAll("."+r.shapes).selectAll("."+r.shape).each(function(s,c){var d=a.select(this),l=s.data?s.data.id:s.id,u=n.getToggle(this,s).bind(n),h=o.data_selection_grouped||!t||t.indexOf(l)>=0,g=!e||e.indexOf(c)>=0,f=d.classed(r.SELECTED);d.classed(r.line)||d.classed(r.area)||(h&&g?o.data_selection_isselectable(s)&&!f&&u(!0,d.classed(r.SELECTED,!0),s,c):void 0!==i&&i&&f&&u(!1,d.classed(r.SELECTED,!1),s,c))})},P.unselect=function(t,e){var i=this.internal,n=i.d3,a=i.config;a.data_selection_enabled&&i.main.selectAll("."+r.shapes).selectAll("."+r.shape).each(function(o,s){var c=n.select(this),d=o.data?o.data.id:o.id,l=i.getToggle(this,o).bind(i),u=a.data_selection_grouped||!t||t.indexOf(d)>=0,h=!e||e.indexOf(s)>=0,g=c.classed(r.SELECTED);c.classed(r.line)||c.classed(r.area)||u&&h&&a.data_selection_isselectable(o)&&g&&l(!1,c.classed(r.SELECTED,!1),o,s)})},P.show=function(t,e){var i,n=this.internal;t=n.mapToTargetIds(t),e=e||{},n.removeHiddenTargetIds(t),(i=n.svg.selectAll(n.selectorTargets(t))).transition().style("opacity",1,"important").call(n.endall,function(){i.style("opacity",null).style("opacity",1)}),e.withLegend&&n.showLegend(t),n.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0,withLegend:!0})},P.hide=function(t,e){var i,n=this.internal;t=n.mapToTargetIds(t),e=e||{},n.addHiddenTargetIds(t),(i=n.svg.selectAll(n.selectorTargets(t))).transition().style("opacity",0,"important").call(n.endall,function(){i.style("opacity",null).style("opacity",0)}),e.withLegend&&n.hideLegend(t),n.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0,withLegend:!0})},P.toggle=function(t,e){var i=this,n=this.internal;n.mapToTargetIds(t).forEach(function(t){n.isTargetToShow(t)?i.hide(t,e):i.show(t,e)})},P.tooltip=function(){},P.tooltip.show=function(t){var e,i,n=this.internal;t.mouse&&(i=t.mouse),t.data?n.isMultipleX()?(i=[n.x(t.data.x),n.getYScale(t.data.id)(t.data.value)],e=null):e=l(t.data.index)?t.data.index:n.getIndexByX(t.data.x):void 0!==t.x?e=n.getIndexByX(t.x):void 0!==t.index&&(e=t.index),n.dispatchEvent("mouseover",e,i),n.dispatchEvent("mousemove",e,i),n.config.tooltip_onshow.call(n,t.data)},P.tooltip.hide=function(){this.internal.dispatchEvent("mouseout",0),this.internal.config.tooltip_onhide.call(this)},P.transform=function(t,e){var i=this.internal,n=["pie","donut"].indexOf(t)>=0?{withTransform:!0}:null;i.transformTo(e,t,n)},C.transformTo=function(t,e,i){var n=this,a=!n.hasArcType(),r=i||{withTransitionForAxis:a};r.withTransitionForTransform=!1,n.transiting=!1,n.setTargetType(t,e),n.updateTargets(n.data.targets),n.updateAndRedraw(r)},P.x=function(t){var e=this.internal;return arguments.length&&(e.updateTargetX(e.data.targets,t),e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})),e.data.xs},P.xs=function(t){var e=this.internal;return arguments.length&&(e.updateTargetXs(e.data.targets,t),e.redraw({withUpdateOrgXDomain:!0,withUpdateXDomain:!0})),e.data.xs},P.zoom=function(t){var e=this.internal;return t&&(e.isTimeSeries()&&(t=t.map(function(t){return e.parseDate(t)})),e.brush.extent(t),e.redraw({withUpdateXDomain:!0,withY:e.config.zoom_rescale}),e.config.zoom_onzoom.call(this,e.x.orgDomain())),e.brush.extent()},P.zoom.enable=function(t){var e=this.internal;e.config.zoom_enabled=t,e.updateAndRedraw()},P.unzoom=function(){var t=this.internal;t.brush.clear().update(),t.redraw({withUpdateXDomain:!0})},P.zoom.max=function(t){var e=this.internal,i=e.config,n=e.d3;if(0!==t&&!t)return i.zoom_x_max;i.zoom_x_max=n.max([e.orgXDomain[1],t])},P.zoom.min=function(t){var e=this.internal,i=e.config,n=e.d3;if(0!==t&&!t)return i.zoom_x_min;i.zoom_x_min=n.min([e.orgXDomain[0],t])},P.zoom.range=function(t){if(!arguments.length)return{max:this.domain.max(),min:this.domain.min()};void 0!==t.max&&this.domain.max(t.max),void 0!==t.min&&this.domain.min(t.min)},C.initPie=function(){var t=this,e=t.d3;t.pie=e.layout.pie().value(function(t){return t.values.reduce(function(t,e){return t+e.value},0)}),t.pie.sort(t.getOrderFunction()||null)},C.updateRadius=function(){var t=this,e=t.config,i=e.gauge_width||e.donut_width;t.radiusExpanded=Math.min(t.arcWidth,t.arcHeight)/2,t.radius=.95*t.radiusExpanded,t.innerRadiusRatio=i?(t.radius-i)/t.radius:.6,t.innerRadius=t.hasType("donut")||t.hasType("gauge")?t.radius*t.innerRadiusRatio:0},C.updateArc=function(){var t=this;t.svgArc=t.getSvgArc(),t.svgArcExpanded=t.getSvgArcExpanded(),t.svgArcExpandedSub=t.getSvgArcExpanded(.98)},C.updateAngle=function(t){var e,i,n,a,r=this,o=r.config,s=!1,c=0;return o?(r.pie(r.filterTargetsToShow(r.data.targets)).forEach(function(e){s||e.data.id!==t.data.id||(s=!0,(t=e).index=c),c++}),isNaN(t.startAngle)&&(t.startAngle=0),isNaN(t.endAngle)&&(t.endAngle=t.startAngle),r.isGaugeType(t.data)&&(e=o.gauge_min,i=o.gauge_max,n=Math.PI*(o.gauge_fullCircle?2:1)/(i-e),a=t.value.375?1.175-36/o.radius:.8)*o.radius/a:0)+","+n*r+")"),d},C.getArcRatio=function(t){var e=this,i=e.config,n=Math.PI*(e.hasType("gauge")&&!i.gauge_fullCircle?1:2);return t?(t.endAngle-t.startAngle)/n:null},C.convertToArcData=function(t){return this.addName({id:t.data.id,value:t.value,ratio:this.getArcRatio(t),index:t.index})},C.textForArcLabel=function(t){var e,i,n,a,r,o=this;return o.shouldShowArcLabel()?(e=o.updateAngle(t),i=e?e.value:null,n=o.getArcRatio(e),a=t.data.id,o.hasType("gauge")||o.meetsArcLabelThreshold(n)?(r=o.getArcLabelFormat(),r?r(i,n,a):o.defaultArcValueFormat(i,n)):""):""},C.textForGaugeMinMax=function(t,e){var i=this.getGaugeLabelExtents();return i?i(t,e):t},C.expandArc=function(t){var e,i=this;i.transiting?e=window.setInterval(function(){i.transiting||(window.clearInterval(e),i.legend.selectAll(".c3-legend-item-focused").size()>0&&i.expandArc(t))},10):(t=i.mapToTargetIds(t),i.svg.selectAll(i.selectorTargets(t,"."+r.chartArc)).each(function(t){i.shouldExpand(t.data.id)&&i.d3.select(this).selectAll("path").transition().duration(i.expandDuration(t.data.id)).attr("d",i.svgArcExpanded).transition().duration(2*i.expandDuration(t.data.id)).attr("d",i.svgArcExpandedSub).each(function(t){i.isDonutType(t.data)})}))},C.unexpandArc=function(t){var e=this;e.transiting||(t=e.mapToTargetIds(t),e.svg.selectAll(e.selectorTargets(t,"."+r.chartArc)).selectAll("path").transition().duration(function(t){return e.expandDuration(t.data.id)}).attr("d",e.svgArc),e.svg.selectAll("."+r.arc))},C.expandDuration=function(t){var e=this,i=e.config;return e.isDonutType(t)?i.donut_expand_duration:e.isGaugeType(t)?i.gauge_expand_duration:e.isPieType(t)?i.pie_expand_duration:50},C.shouldExpand=function(t){var e=this,i=e.config;return e.isDonutType(t)&&i.donut_expand||e.isGaugeType(t)&&i.gauge_expand||e.isPieType(t)&&i.pie_expand},C.shouldShowArcLabel=function(){var t=this,e=t.config,i=!0;return t.hasType("donut")?i=e.donut_label_show:t.hasType("pie")&&(i=e.pie_label_show),i},C.meetsArcLabelThreshold=function(t){var e=this,i=e.config;return t>=(e.hasType("donut")?i.donut_label_threshold:i.pie_label_threshold)},C.getArcLabelFormat=function(){var t=this,e=t.config,i=e.pie_label_format;return t.hasType("gauge")?i=e.gauge_label_format:t.hasType("donut")&&(i=e.donut_label_format),i},C.getGaugeLabelExtents=function(){return this.config.gauge_label_extents},C.getArcTitle=function(){var t=this;return t.hasType("donut")?t.config.donut_title:""},C.updateTargetsForArc=function(t){var e,i=this,n=i.main,a=i.classChartArc.bind(i),o=i.classArcs.bind(i),s=i.classFocus.bind(i);(e=n.select("."+r.chartArcs).selectAll("."+r.chartArc).data(i.pie(t)).attr("class",function(t){return a(t)+s(t.data)}).enter().append("g").attr("class",a)).append("g").attr("class",o),e.append("text").attr("dy",i.hasType("gauge")?"-.1em":".35em").style("opacity",0).style("text-anchor","middle").style("pointer-events","none")},C.initArc=function(){var t=this;t.arcs=t.main.select("."+r.chart).append("g").attr("class",r.chartArcs).attr("transform",t.getTranslate("arc")),t.arcs.append("text").attr("class",r.chartArcsTitle).style("text-anchor","middle").text(t.getArcTitle())},C.redrawArc=function(t,e,i){var n,a=this,o=a.d3,s=a.config,c=a.main;(n=c.selectAll("."+r.arcs).selectAll("."+r.arc).data(a.arcData.bind(a))).enter().append("path").attr("class",a.classArc.bind(a)).style("fill",function(t){return a.color(t.data)}).style("cursor",function(t){return s.interaction_enabled&&s.data_selection_isselectable(t)?"pointer":null}).each(function(t){a.isGaugeType(t.data)&&(t.startAngle=t.endAngle=s.gauge_startingAngle),this._current=t}),n.attr("transform",function(t){return!a.isGaugeType(t.data)&&i?"scale(0)":""}).on("mouseover",s.interaction_enabled?function(t){var e,i;a.transiting||(e=a.updateAngle(t))&&(i=a.convertToArcData(e),a.expandArc(e.data.id),a.api.focus(e.data.id),a.toggleFocusLegend(e.data.id,!0),a.config.data_onmouseover(i,this))}:null).on("mousemove",s.interaction_enabled?function(t){var e,i=a.updateAngle(t);i&&(e=[a.convertToArcData(i)],a.showTooltip(e,this))}:null).on("mouseout",s.interaction_enabled?function(t){var e,i;a.transiting||(e=a.updateAngle(t))&&(i=a.convertToArcData(e),a.unexpandArc(e.data.id),a.api.revert(),a.revertLegend(),a.hideTooltip(),a.config.data_onmouseout(i,this))}:null).on("click",s.interaction_enabled?function(t,e){var i,n=a.updateAngle(t);n&&(i=a.convertToArcData(n),a.toggleShape&&a.toggleShape(this,i,e),a.config.data_onclick.call(a.api,i,this))}:null).each(function(){a.transiting=!0}).transition().duration(t).attrTween("d",function(t){var e,i=a.updateAngle(t);return i?(isNaN(this._current.startAngle)&&(this._current.startAngle=0),isNaN(this._current.endAngle)&&(this._current.endAngle=this._current.startAngle),e=o.interpolate(this._current,i),this._current=e(0),function(i){var n=e(i);return n.data=t.data,a.getArc(n,!0)}):function(){return"M 0 0"}}).attr("transform",i?"scale(1)":"").style("fill",function(t){return a.levelColor?a.levelColor(t.data.values[0].value):a.color(t.data.id)}).call(a.endall,function(){a.transiting=!1}),n.exit().transition().duration(e).style("opacity",0).remove(),c.selectAll("."+r.chartArc).select("text").style("opacity",0).attr("class",function(t){return a.isGaugeType(t.data)?r.gaugeValue:""}).text(a.textForArcLabel.bind(a)).attr("transform",a.transformForArcLabel.bind(a)).style("font-size",function(t){return a.isGaugeType(t.data)?Math.round(a.radius/5)+"px":""}).transition().duration(t).style("opacity",function(t){return a.isTargetToShow(t.data.id)&&a.isArcType(t.data)?1:0}),c.select("."+r.chartArcsTitle).style("opacity",a.hasType("donut")||a.hasType("gauge")?1:0),a.hasType("gauge")&&(a.arcs.select("."+r.chartArcsBackground).attr("d",function(){var t={data:[{value:s.gauge_max}],startAngle:s.gauge_startingAngle,endAngle:-1*s.gauge_startingAngle};return a.getArc(t,!0,!0)}),a.arcs.select("."+r.chartArcsGaugeUnit).attr("dy",".75em").text(s.gauge_label_show?s.gauge_units:""),a.arcs.select("."+r.chartArcsGaugeMin).attr("dx",-1*(a.innerRadius+(a.radius-a.innerRadius)/(s.gauge_fullCircle?1:2))+"px").attr("dy","1.2em").text(s.gauge_label_show?a.textForGaugeMinMax(s.gauge_min,!1):""),a.arcs.select("."+r.chartArcsGaugeMax).attr("dx",a.innerRadius+(a.radius-a.innerRadius)/(s.gauge_fullCircle?1:2)+"px").attr("dy","1.2em").text(s.gauge_label_show?a.textForGaugeMinMax(s.gauge_max,!0):""))},C.initGauge=function(){var t=this.arcs;this.hasType("gauge")&&(t.append("path").attr("class",r.chartArcsBackground),t.append("text").attr("class",r.chartArcsGaugeUnit).style("text-anchor","middle").style("pointer-events","none"),t.append("text").attr("class",r.chartArcsGaugeMin).style("text-anchor","middle").style("pointer-events","none"),t.append("text").attr("class",r.chartArcsGaugeMax).style("text-anchor","middle").style("pointer-events","none"))},C.getGaugeLabelHeight=function(){return this.config.gauge_label_show?20:0},C.hasCaches=function(t){for(var e=0;e=0?r.focused:"")},C.classDefocused=function(t){return" "+(this.defocusedTargetIds.indexOf(t.id)>=0?r.defocused:"")},C.classChartText=function(t){return r.chartText+this.classTarget(t.id)},C.classChartLine=function(t){return r.chartLine+this.classTarget(t.id)},C.classChartBar=function(t){return r.chartBar+this.classTarget(t.id)},C.classChartArc=function(t){return r.chartArc+this.classTarget(t.data.id)},C.getTargetSelectorSuffix=function(t){return t||0===t?("-"+t).replace(/[\s?!@#$%^&*()_=+,.<>'":;\[\]\/|~`{}\\]/g,"-"):""},C.selectorTarget=function(t,e){return(e||"")+"."+r.target+this.getTargetSelectorSuffix(t)},C.selectorTargets=function(t,e){var i=this;return t=t||[],t.length?t.map(function(t){return i.selectorTarget(t,e)}):null},C.selectorLegend=function(t){return"."+r.legendItem+this.getTargetSelectorSuffix(t)},C.selectorLegends=function(t){var e=this;return t&&t.length?t.map(function(t){return e.selectorLegend(t)}):null},C.getClipPath=function(t){return"url("+(window.navigator.appVersion.toLowerCase().indexOf("msie 9.")>=0?"":document.URL.split("#")[0])+"#"+t+")"},C.appendClip=function(t,e){return t.append("clipPath").attr("id",e).append("rect")},C.getAxisClipX=function(t){var e=Math.max(30,this.margin.left);return t?-(1+e):-(e-1)},C.getAxisClipY=function(t){return t?-20:-this.margin.top},C.getXAxisClipX=function(){var t=this;return t.getAxisClipX(!t.config.axis_rotated)},C.getXAxisClipY=function(){var t=this;return t.getAxisClipY(!t.config.axis_rotated)},C.getYAxisClipX=function(){var t=this;return t.config.axis_y_inner?-1:t.getAxisClipX(t.config.axis_rotated)},C.getYAxisClipY=function(){var t=this;return t.getAxisClipY(t.config.axis_rotated)},C.getAxisClipWidth=function(t){var e=this,i=Math.max(30,e.margin.left),n=Math.max(30,e.margin.right);return t?e.width+2+i+n:e.margin.left+20},C.getAxisClipHeight=function(t){return(t?this.margin.bottom:this.margin.top+this.height)+20},C.getXAxisClipWidth=function(){var t=this;return t.getAxisClipWidth(!t.config.axis_rotated)},C.getXAxisClipHeight=function(){var t=this;return t.getAxisClipHeight(!t.config.axis_rotated)},C.getYAxisClipWidth=function(){var t=this;return t.getAxisClipWidth(t.config.axis_rotated)+(t.config.axis_y_inner?20:0)},C.getYAxisClipHeight=function(){var t=this;return t.getAxisClipHeight(t.config.axis_rotated)},C.generateColor=function(){var t=this,e=t.config,i=t.d3,n=e.data_colors,a=S(e.color_pattern)?e.color_pattern:i.scale.category10().range(),r=e.data_color,o=[];return function(t){var e,i=t.id||t.data&&t.data.id||t;return n[i]instanceof Function?e=n[i](t):n[i]?e=n[i]:(o.indexOf(i)<0&&o.push(i),e=a[o.indexOf(i)%a.length],n[i]=e),r instanceof Function?r(e,t):e}},C.generateLevelColor=function(){var t=this.config,e=t.color_pattern,i=t.color_threshold,n="value"===i.unit,a=i.values&&i.values.length?i.values:[],r=i.max||100;return S(t.color_threshold)?function(t){var i,o=e[e.length-1];for(i=0;i=0?n.data.xs[i]=(e&&n.data.xs[i]?n.data.xs[i]:[]).concat(t.map(function(t){return t[r]}).filter(l).map(function(t,e){return n.generateTargetX(t,i,e)})):a.data_x?n.data.xs[i]=n.getOtherTargetXs():S(a.data_xs)&&(n.data.xs[i]=n.getXValuesOfXKey(r,n.data.targets)):n.data.xs[i]=t.map(function(t,e){return e})}),r.forEach(function(t){if(!n.data.xs[t])throw new Error('x is not defined for id = "'+t+'".')}),(i=r.map(function(e,i){var r=a.data_idConverter(e);return{id:r,id_org:e,values:t.map(function(t,o){var s,c=t[n.getXKey(e)],d=null===t[e]||isNaN(t[e])?null:+t[e];return n.isCustomX()&&n.isCategorized()&&void 0!==c?(0===i&&0===o&&(a.axis_x_categories=[]),-1===(s=a.axis_x_categories.indexOf(c))&&(s=a.axis_x_categories.length,a.axis_x_categories.push(c))):s=n.generateTargetX(c,e,o),(void 0===t[e]||n.data.xs[e].length<=o)&&(s=void 0),{x:s,value:d,id:r}}).filter(function(t){return p(t.x)})}})).forEach(function(t){var e;a.data_xSort&&(t.values=t.values.sort(function(t,e){return(t.x||0===t.x?t.x:1/0)-(e.x||0===e.x?e.x:1/0)})),e=0,t.values.forEach(function(t){t.index=e++}),n.data.xs[t.id].sort(function(t,e){return t-e})}),n.hasNegativeValue=n.hasNegativeValueInTargets(i),n.hasPositiveValue=n.hasPositiveValueInTargets(i),a.data_type&&n.setTargetType(n.mapToIds(i).filter(function(t){return!(t in a.data_types)}),a.data_type),i.forEach(function(t){n.addCache(t.id_org,t)}),i},C.isX=function(t){var e=this.config;return e.data_x&&t===e.data_x||S(e.data_xs)&&v(e.data_xs,t)},C.isNotX=function(t){return!this.isX(t)},C.getXKey=function(t){var e=this.config;return e.data_x?e.data_x:S(e.data_xs)?e.data_xs[t]:null},C.getXValuesOfXKey=function(t,e){var i,n=this;return(e&&S(e)?n.mapToIds(e):[]).forEach(function(e){n.getXKey(e)===t&&(i=n.data.xs[e])}),i},C.getIndexByX=function(t){var e=this,i=e.filterByX(e.data.targets,t);return i.length?i[0].index:null},C.getXValue=function(t,e){var i=this;return t in i.data.xs&&i.data.xs[t]&&l(i.data.xs[t][e])?i.data.xs[t][e]:e},C.getOtherTargetXs=function(){var t=this,e=Object.keys(t.data.xs);return e.length?t.data.xs[e[0]]:null},C.getOtherTargetX=function(t){var e=this.getOtherTargetXs();return e&&t1},C.isMultipleX=function(){return S(this.config.data_xs)||!this.config.data_xSort||this.hasType("scatter")},C.addName=function(t){var e,i=this;return t&&(e=i.config.data_names[t.id],t.name=void 0!==e?e:t.id),t},C.getValueOnIndex=function(t,e){var i=t.filter(function(t){return t.index===e});return i.length?i[0]:null},C.updateTargetX=function(t,e){var i=this;t.forEach(function(t){t.values.forEach(function(n,a){n.x=i.generateTargetX(e[a],t.id,a)}),i.data.xs[t.id]=e})},C.updateTargetXs=function(t,e){var i=this;t.forEach(function(t){e[t.id]&&i.updateTargetX([t],e[t.id])})},C.generateTargetX=function(t,e,i){var n=this;return n.isTimeSeries()?t?n.parseDate(t):n.parseDate(n.getXValue(e,i)):n.isCustomX()&&!n.isCategorized()?l(t)?+t:n.getXValue(e,i):i},C.cloneTarget=function(t){return{id:t.id,id_org:t.id_org,values:t.values.map(function(t){return{x:t.x,value:t.value,id:t.id}})}},C.updateXs=function(){var t=this;t.data.targets.length&&(t.xs=[],t.data.targets[0].values.forEach(function(e){t.xs[e.index]=e.x}))},C.getPrevX=function(t){var e=this.xs[t-1];return void 0!==e?e:null},C.getNextX=function(t){var e=this.xs[t+1];return void 0!==e?e:null},C.getMaxDataCount=function(){var t=this;return t.d3.max(t.data.targets,function(t){return t.values.length})},C.getMaxDataCountTarget=function(t){var e,i=t.length,n=0;return i>1?t.forEach(function(t){t.values.length>n&&(e=t,n=t.values.length)}):e=i?t[0]:null,e},C.getEdgeX=function(t){var e=this;return t.length?[e.d3.min(t,function(t){return t.values[0].x}),e.d3.max(t,function(t){return t.values[t.values.length-1].x})]:[0,0]},C.mapToIds=function(t){return t.map(function(t){return t.id})},C.mapToTargetIds=function(t){var e=this;return t?[].concat(t):e.mapToIds(e.data.targets)},C.hasTarget=function(t,e){var i,n=this.mapToIds(t);for(i=0;ie?1:t>=e?0:NaN})},C.addHiddenTargetIds=function(t){t=t instanceof Array?t:new Array(t);for(var e=0;e0})},C.isOrderDesc=function(){var t=this.config;return"string"==typeof t.data_order&&"desc"===t.data_order.toLowerCase()},C.isOrderAsc=function(){var t=this.config;return"string"==typeof t.data_order&&"asc"===t.data_order.toLowerCase()},C.getOrderFunction=function(){var t=this,e=t.config,i=t.isOrderAsc(),n=t.isOrderDesc();if(i||n)return function(t,e){var i=function(t,e){return t+Math.abs(e.value)},a=t.values.reduce(i,0),r=e.values.reduce(i,0);return n?r-a:a-r};if(u(e.data_order))return e.data_order;if(h(e.data_order)){var a=e.data_order;return function(t,e){return a.indexOf(t.id)-a.indexOf(e.id)}}},C.orderTargets=function(t){var e=this.getOrderFunction();return e&&(t.sort(e),(this.isOrderAsc()||this.isOrderDesc())&&t.reverse()),t},C.filterByX=function(t,e){return this.d3.merge(t.map(function(t){return t.values})).filter(function(t){return t.x-e==0})},C.filterRemoveNull=function(t){return t.filter(function(t){return l(t.value)})},C.filterByXDomain=function(t,e){return t.map(function(t){return{id:t.id,id_org:t.id_org,values:t.values.filter(function(t){return e[0]<=t.x&&t.x<=e[1]})}})},C.hasDataLabel=function(){var t=this.config;return!("boolean"!=typeof t.data_labels||!t.data_labels)||!("object"!==o(t.data_labels)||!S(t.data_labels))},C.getDataLabelLength=function(t,e,i){var n=this,a=[0,0];return n.selectChart.select("svg").selectAll(".dummy").data([t,e]).enter().append("text").text(function(t){return n.dataLabelFormat(t.id)(t)}).each(function(t,e){a[e]=1.3*this.getBoundingClientRect()[i]}).remove(),a},C.isNoneArc=function(t){return this.hasTarget(this.data.targets,t.id)},C.isArc=function(t){return"data"in t&&this.hasTarget(this.data.targets,t.data.id)},C.findSameXOfValues=function(t,e){var i,n=t[e].x,a=[];for(i=e-1;i>=0&&n===t[i].x;i--)a.push(t[i]);for(i=e;i0)for(o=s.hasNegativeValueInTargets(t),e=0;e=0})).length)for(n=a[0],o&&l[n]&&l[n].forEach(function(t,e){l[n][e]=t<0?t:0}),i=1;i0||(l[n][e]+=+t)});return s.d3.min(Object.keys(l).map(function(t){return s.d3.min(l[t])}))},C.getYDomainMax=function(t){var e,i,n,a,r,o,s=this,c=s.config,d=s.mapToIds(t),l=s.getValuesAsIdKeyed(t);if(c.data_groups.length>0)for(o=s.hasPositiveValueInTargets(t),e=0;e=0})).length)for(n=a[0],o&&l[n]&&l[n].forEach(function(t,e){l[n][e]=t>0?t:0}),i=1;i=0&&b>=0,g=v<=0&&b<=0,(l(y)&&h||l(w)&&g)&&(A=!1),A&&(h&&(v=0),g&&(b=0)),a=Math.abs(b-v),r=o=.1*a,void 0!==T&&(b=T+(s=Math.max(Math.abs(v),Math.abs(b))),v=T-s),C?(c=f.getDataLabelLength(v,b,"width"),d=m(f.y.range()),r+=a*((u=[c[0]/d,c[1]/d])[1]/(1-u[0]-u[1])),o+=a*(u[0]/(1-u[0]-u[1]))):L&&(c=f.getDataLabelLength(v,b,"height"),r+=f.axis.convertPixelsToAxisPadding(c[1],a),o+=f.axis.convertPixelsToAxisPadding(c[0],a)),"y"===e&&S(p.axis_y_padding)&&(r=f.axis.getPadding(p.axis_y_padding,"top",r,a),o=f.axis.getPadding(p.axis_y_padding,"bottom",o,a)),"y2"===e&&S(p.axis_y2_padding)&&(r=f.axis.getPadding(p.axis_y2_padding,"top",r,a),o=f.axis.getPadding(p.axis_y2_padding,"bottom",o,a)),A&&(h&&(o=v),g&&(r=-b)),n=[v-o,b+r],P?n.reverse():n)},C.getXDomainMin=function(t){var e=this,i=e.config;return void 0!==i.axis_x_min?e.isTimeSeries()?this.parseDate(i.axis_x_min):i.axis_x_min:e.d3.min(t,function(t){return e.d3.min(t.values,function(t){return t.x})})},C.getXDomainMax=function(t){var e=this,i=e.config;return void 0!==i.axis_x_max?e.isTimeSeries()?this.parseDate(i.axis_x_max):i.axis_x_max:e.d3.max(t,function(t){return e.d3.max(t.values,function(t){return t.x})})},C.getXDomainPadding=function(t){var e,i,n,a,r=this,s=r.config,c=t[1]-t[0];return i=r.isCategorized()?0:r.hasType("bar")?(e=r.getMaxDataCount())>1?c/(e-1)/2:.5:.01*c,"object"===o(s.axis_x_padding)&&S(s.axis_x_padding)?(n=l(s.axis_x_padding.left)?s.axis_x_padding.left:i,a=l(s.axis_x_padding.right)?s.axis_x_padding.right:i):n=a="number"==typeof s.axis_x_padding?s.axis_x_padding:i,{left:n,right:a}},C.getXDomain=function(t){var e=this,i=[e.getXDomainMin(t),e.getXDomainMax(t)],n=i[0],a=i[1],r=e.getXDomainPadding(i),o=0,s=0;return n-a!=0||e.isCategorized()||(e.isTimeSeries()?(n=new Date(.5*n.getTime()),a=new Date(1.5*a.getTime())):(n=0===n?1:.5*n,a=0===a?-1:1.5*a)),(n||0===n)&&(o=e.isTimeSeries()?new Date(n.getTime()-r.left):n-r.left),(a||0===a)&&(s=e.isTimeSeries()?new Date(a.getTime()+r.right):a+r.right),[o,s]},C.updateXDomain=function(t,e,i,n,a){var r=this,o=r.config;return i&&(r.x.domain(a||r.d3.extent(r.getXDomain(t))),r.orgXDomain=r.x.domain(),o.zoom_enabled&&r.zoom.scale(r.x).updateScaleExtent(),r.subX.domain(r.x.domain()),r.brush&&r.brush.scale(r.subX)),e&&(r.x.domain(a||(!r.brush||r.brush.empty()?r.orgXDomain:r.brush.extent())),o.zoom_enabled&&r.zoom.scale(r.x).updateScaleExtent()),n&&r.x.domain(r.trimXDomain(r.x.orgDomain())),r.x.domain()},C.trimXDomain=function(t){var e=this.getZoomDomain(),i=e[0],n=e[1];return t[0]<=i&&(t[1]=+t[1]+(i-t[0]),t[0]=i),n<=t[1]&&(t[0]=+t[0]-(t[1]-n),t[1]=n),t},C.drag=function(t){var e,i,n,a,o,s,c,d,l=this,u=l.config,h=l.main,g=l.d3;l.hasArcType()||u.data_selection_enabled&&(u.zoom_enabled&&!l.zoom.altDomain||u.data_selection_multiple&&(e=l.dragStart[0],i=l.dragStart[1],n=t[0],a=t[1],o=Math.min(e,n),s=Math.max(e,n),c=u.data_selection_grouped?l.margin.top:Math.min(i,a),d=u.data_selection_grouped?l.height:Math.max(i,a),h.select("."+r.dragarea).attr("x",o).attr("y",c).attr("width",s-o).attr("height",d-c),h.selectAll("."+r.shapes).selectAll("."+r.shape).filter(function(t){return u.data_selection_isselectable(t)}).each(function(t,e){var i,n,a,u,h,f,p=g.select(this),_=p.classed(r.SELECTED),x=p.classed(r.INCLUDED),m=!1;if(p.classed(r.circle))i=1*p.attr("cx"),n=1*p.attr("cy"),h=l.togglePoint,m=od&&(c=c.filter(function(t){return(""+t).indexOf(".")<0}));return c},C.getGridFilterToRemove=function(t){return t?function(e){var i=!1;return[].concat(t).forEach(function(t){("value"in t&&e.value===t.value||"class"in t&&e.class===t.class)&&(i=!0)}),i}:function(){return!0}},C.removeGridLines=function(t,e){var i=this,n=i.config,a=i.getGridFilterToRemove(t),o=function(t){return!a(t)},s=e?r.xgridLines:r.ygridLines,c=e?r.xgridLine:r.ygridLine;i.main.select("."+s).selectAll("."+c).filter(a).transition().duration(n.transition_duration).style("opacity",0).remove(),e?n.grid_x_lines=n.grid_x_lines.filter(o):n.grid_y_lines=n.grid_y_lines.filter(o)},C.initEventRect=function(){this.main.select("."+r.chart).append("g").attr("class",r.eventRects).style("fill-opacity",0)},C.redrawEventRect=function(){var t,e,i=this,n=i.config,a=i.isMultipleX(),o=i.main.select("."+r.eventRects).style("cursor",n.zoom_enabled?n.axis_rotated?"ns-resize":"ew-resize":null).classed(r.eventRectsMultiple,a).classed(r.eventRectsSingle,!a);o.selectAll("."+r.eventRect).remove(),i.eventRect=o.selectAll("."+r.eventRect),a?(t=i.eventRect.data([0]),i.generateEventRectsForMultipleXs(t.enter()),i.updateEventRect(t)):(e=i.getMaxDataCountTarget(i.data.targets),o.datum(e?e.values:[]),i.eventRect=o.selectAll("."+r.eventRect),t=i.eventRect.data(function(t){return t}),i.generateEventRectsForSingleX(t.enter()),i.updateEventRect(t),t.exit().remove())},C.updateEventRect=function(t){var e,i,n,a,r,o,s=this,c=s.config;t=t||s.eventRect.data(function(t){return t}),s.isMultipleX()?(e=0,i=0,n=s.width,a=s.height):(!s.isCustomX()&&!s.isTimeSeries()||s.isCategorized()?(r=s.getEventRectWidth(),o=function(t){return s.x(t.x)-r/2}):(s.updateXs(),r=function(t){var e=s.getPrevX(t.index),i=s.getNextX(t.index);return null===e&&null===i?c.axis_rotated?s.height:s.width:(null===e&&(e=s.x.domain()[0]),null===i&&(i=s.x.domain()[1]),Math.max(0,(s.x(i)-s.x(e))/2))},o=function(t){var e=s.getPrevX(t.index),i=s.getNextX(t.index),n=s.data.xs[t.id][t.index];return null===e&&null===i?0:(null===e&&(e=s.x.domain()[0]),(s.x(n)+s.x(e))/2)}),e=c.axis_rotated?0:o,i=c.axis_rotated?o:0,n=c.axis_rotated?s.width:r,a=c.axis_rotated?r:s.height),t.attr("class",s.classEvent.bind(s)).attr("x",e).attr("y",i).attr("width",n).attr("height",a)},C.generateEventRectsForSingleX=function(t){var e=this,i=e.d3,n=e.config;t.append("rect").attr("class",e.classEvent.bind(e)).style("cursor",n.data_selection_enabled&&n.data_selection_grouped?"pointer":null).on("mouseover",function(t){var i=t.index;e.dragging||e.flowing||e.hasArcType()||(n.point_focus_expand_enabled&&e.expandCircles(i,null,!0),e.expandBars(i,null,!0),e.main.selectAll("."+r.shape+"-"+i).each(function(t){n.data_onmouseover.call(e.api,t)}))}).on("mouseout",function(t){var i=t.index;e.config&&(e.hasArcType()||(e.hideXGridFocus(),e.hideTooltip(),e.unexpandCircles(),e.unexpandBars(),e.main.selectAll("."+r.shape+"-"+i).each(function(t){n.data_onmouseout.call(e.api,t)})))}).on("mousemove",function(t){var a,o=t.index,s=e.svg.select("."+r.eventRect+"-"+o);e.dragging||e.flowing||e.hasArcType()||(e.isStepType(t)&&"step-after"===e.config.line_step_type&&i.mouse(this)[0]=0}).classed(r.legendItemFocused,e).transition().duration(100).style("opacity",function(){return(e?i.opacityForLegend:i.opacityForUnfocusedLegend).call(i,i.d3.select(this))})},C.revertLegend=function(){var t=this,e=t.d3;t.legend.selectAll("."+r.legendItem).classed(r.legendItemFocused,!1).transition().duration(100).style("opacity",function(){return t.opacityForLegend(e.select(this))})},C.showLegend=function(t){var e=this,i=e.config;i.legend_show||(i.legend_show=!0,e.legend.style("visibility","visible"),e.legendHasRendered||e.updateLegendWithDefaults()),e.removeHiddenLegendIds(t),e.legend.selectAll(e.selectorLegends(t)).style("visibility","visible").transition().style("opacity",function(){return e.opacityForLegend(e.d3.select(this))})},C.hideLegend=function(t){var e=this,i=e.config;i.legend_show&&y(t)&&(i.legend_show=!1,e.legend.style("visibility","hidden")),e.addHiddenLegendIds(t),e.legend.selectAll(e.selectorLegends(t)).style("opacity",0).style("visibility","hidden")},C.clearLegendItemTextBoxCache=function(){this.legendItemTextBox={}},C.updateLegend=function(t,e,i){function n(t,e){return b.legendItemTextBox[e]||(b.legendItemTextBox[e]=b.getTextRect(t.textContent,r.legendItem,t)),b.legendItemTextBox[e]}function a(e,i,a){function r(t,e){e||(o=(f-E-g)/2)=C)&&(C=u),(!L||h>=L)&&(L=h),s=b.isLegendRight||b.isLegendInset?L:C,T.legend_equally?(Object.keys(I).forEach(function(t){I[t]=C}),Object.keys(R).forEach(function(t){R[t]=L}),(o=(f-s*t.length)/2)0&&0===v.size()&&(v=b.legend.insert("g","."+r.legendItem).attr("class",r.legendBackground).append("rect")),m=b.legend.selectAll("text").data(t).text(function(t){return void 0!==T.data_names[t]?T.data_names[t]:t}).each(function(t,e){a(this,t,e)}),(_?m.transition():m).attr("x",s).attr("y",l),y=b.legend.selectAll("rect."+r.legendItemEvent).data(t),(_?y.transition():y).attr("width",function(t){return I[t]}).attr("height",function(t){return R[t]}).attr("x",c).attr("y",u),S=b.legend.selectAll("line."+r.legendItemTile).data(t),(_?S.transition():S).style("stroke",b.color).attr("x1",h).attr("y1",f).attr("x2",g).attr("y2",f),v&&(_?v.transition():v).attr("height",b.getLegendHeight()-12).attr("width",C*(F+1)+10),b.legend.selectAll("."+r.legendItem).classed(r.legendItemHidden,function(t){return!b.isTargetToShow(t)}),b.updateLegendItemWidth(C),b.updateLegendItemHeight(L),b.updateLegendStep(F),b.updateSizes(),b.updateScales(),b.updateSvgSize(),b.transformAll(x,i),b.legendHasRendered=!0},C.initRegion=function(){var t=this;t.region=t.main.append("g").attr("clip-path",t.clipPath).attr("class",r.regions)},C.updateRegion=function(t){var e=this,i=e.config;e.region.style("visibility",e.hasArcType()?"hidden":"visible"),e.mainRegion=e.main.select("."+r.regions).selectAll("."+r.region).data(i.regions),e.mainRegion.enter().append("g").append("rect").style("fill-opacity",0),e.mainRegion.attr("class",e.classRegion.bind(e)),e.mainRegion.exit().transition().duration(t).style("opacity",0).remove()},C.redrawRegion=function(t){var e=this,i=e.mainRegion.selectAll("rect").each(function(){var t=e.d3.select(this.parentNode).datum();e.d3.select(this).datum(t)}),n=e.regionX.bind(e),a=e.regionY.bind(e),r=e.regionWidth.bind(e),o=e.regionHeight.bind(e);return[(t?i.transition():i).attr("x",n).attr("y",a).attr("width",r).attr("height",o).style("fill-opacity",function(t){return l(t.opacity)?t.opacity:.1})]},C.regionX=function(t){var e=this,i=e.config,n="y"===t.axis?e.y:e.y2;return"y"===t.axis||"y2"===t.axis?i.axis_rotated&&"start"in t?n(t.start):0:i.axis_rotated?0:"start"in t?e.x(e.isTimeSeries()?e.parseDate(t.start):t.start):0},C.regionY=function(t){var e=this,i=e.config,n="y"===t.axis?e.y:e.y2;return"y"===t.axis||"y2"===t.axis?i.axis_rotated?0:"end"in t?n(t.end):0:i.axis_rotated&&"start"in t?e.x(e.isTimeSeries()?e.parseDate(t.start):t.start):0},C.regionWidth=function(t){var e,i=this,n=i.config,a=i.regionX(t),r="y"===t.axis?i.y:i.y2;return e="y"===t.axis||"y2"===t.axis?n.axis_rotated&&"end"in t?r(t.end):i.width:n.axis_rotated?i.width:"end"in t?i.x(i.isTimeSeries()?i.parseDate(t.end):t.end):i.width,ei.bar_width_max?i.bar_width_max:n},C.getBars=function(t,e){var i=this;return(e?i.main.selectAll("."+r.bars+i.getTargetSelectorSuffix(e)):i.main).selectAll("."+r.bar+(l(t)?"-"+t:""))},C.expandBars=function(t,e,i){var n=this;i&&n.unexpandBars(),n.getBars(t,e).classed(r.EXPANDED,!0)},C.unexpandBars=function(t){this.getBars(t).classed(r.EXPANDED,!1)},C.generateDrawBar=function(t,e){var i=this,n=i.config,a=i.generateGetBarPoints(t,e);return function(t,e){var i=a(t,e),r=n.axis_rotated?1:0,o=n.axis_rotated?0:1;return"M "+i[0][r]+","+i[0][o]+" L"+i[1][r]+","+i[1][o]+" L"+i[2][r]+","+i[2][o]+" L"+i[3][r]+","+i[3][o]+" z"}},C.generateGetBarPoints=function(t,e){var i=this,n=e?i.subXAxis:i.xAxis,a=t.__max__+1,r=i.getBarW(n,a),o=i.getShapeX(r,a,t,!!e),s=i.getShapeY(!!e),c=i.getShapeOffset(i.isBarType,t,!!e),d=r*(i.config.bar_space/2),l=e?i.getSubYScale:i.getYScale;return function(t,e){var n=l.call(i,t.id)(0),a=c(t,e)||n,u=o(t),h=s(t);return i.config.axis_rotated&&(0=0&&(d+=s(a[o].value)-c))}),d}},C.isWithinShape=function(t,e){var i,n=this,a=n.d3.select(t);return n.isTargetToShow(e.id)?"circle"===t.nodeName?i=n.isStepType(e)?n.isWithinStep(t,n.getYScale(e.id)(e.value)):n.isWithinCircle(t,1.5*n.pointSelectR(e)):"path"===t.nodeName&&(i=!a.classed(r.bar)||n.isWithinBar(t)):i=!1,i},C.getInterpolate=function(t){var e=this,i=e.isInterpolationType(e.config.spline_interpolation_type)?e.config.spline_interpolation_type:"cardinal";return e.isSplineType(t)?i:e.isStepType(t)?e.config.line_step_type:"linear"},C.initLine=function(){this.main.select("."+r.chart).append("g").attr("class",r.chartLines)},C.updateTargetsForLine=function(t){var e,i=this,n=i.config,a=i.classChartLine.bind(i),o=i.classLines.bind(i),s=i.classAreas.bind(i),c=i.classCircles.bind(i),d=i.classFocus.bind(i);(e=i.main.select("."+r.chartLines).selectAll("."+r.chartLine).data(t).attr("class",function(t){return a(t)+d(t)}).enter().append("g").attr("class",a).style("opacity",0).style("pointer-events","none")).append("g").attr("class",o),e.append("g").attr("class",s),e.append("g").attr("class",function(t){return i.generateClass(r.selectedCircles,t.id)}),e.append("g").attr("class",c).style("cursor",function(t){return n.data_selection_isselectable(t)?"pointer":null}),t.forEach(function(t){i.main.selectAll("."+r.selectedCircles+i.getTargetSelectorSuffix(t.id)).selectAll("."+r.selectedCircle).each(function(e){e.value=t.values[e.index].value})})},C.updateLine=function(t){var e=this;e.mainLine=e.main.selectAll("."+r.lines).selectAll("."+r.line).data(e.lineData.bind(e)),e.mainLine.enter().append("path").attr("class",e.classLine.bind(e)).style("stroke",e.color),e.mainLine.style("opacity",e.initialOpacity.bind(e)).style("shape-rendering",function(t){return e.isStepType(t)?"crispEdges":""}).attr("transform",null),e.mainLine.exit().transition().duration(t).style("opacity",0).remove()},C.redrawLine=function(t,e){return[(e?this.mainLine.transition(Math.random().toString()):this.mainLine).attr("d",t).style("stroke",this.color).style("opacity",1)]},C.generateDrawLine=function(t,e){var i=this,n=i.config,a=i.d3.svg.line(),r=i.generateGetLinePoints(t,e),o=e?i.getSubYScale:i.getYScale,s=function(t){return(e?i.subxx:i.xx).call(i,t)},c=function(t,e){return n.data_groups.length>0?r(t,e)[0][1]:o.call(i,t.id)(t.value)};return a=n.axis_rotated?a.x(c).y(s):a.x(s).y(c),n.line_connectNull||(a=a.defined(function(t){return null!=t.value})),function(t){var r,s=n.line_connectNull?i.filterRemoveNull(t.values):t.values,c=e?i.x:i.subX,d=o.call(i,t.id),l=0,u=0;return i.isLineType(t)?n.data_regions[t.id]?r=i.lineWithRegions(s,c,d,n.data_regions[t.id]):(i.isStepType(t)&&(s=i.convertValuesToStep(s)),r=a.interpolate(i.getInterpolate(t))(s)):(s[0]&&(l=c(s[0].x),u=d(s[0].value)),r=n.axis_rotated?"M "+u+" "+l:"M "+l+" "+u),r||"M 0 0"}},C.generateGetLinePoints=function(t,e){var i=this,n=i.config,a=t.__max__+1,r=i.getShapeX(0,a,t,!!e),o=i.getShapeY(!!e),s=i.getShapeOffset(i.isLineType,t,!!e),c=e?i.getSubYScale:i.getYScale;return function(t,e){var a=c.call(i,t.id)(0),d=s(t,e)||a,l=r(t),u=o(t);return n.axis_rotated&&(00?r(t,e)[0][1]:o.call(i,t.id)(i.getAreaBaseValue(t.id))},d=function(t,e){return n.data_groups.length>0?r(t,e)[1][1]:o.call(i,t.id)(t.value)};return a=n.axis_rotated?a.x0(c).x1(d).y(s):a.x(s).y0(n.area_above?0:c).y1(d),n.line_connectNull||(a=a.defined(function(t){return null!==t.value})),function(t){var e,r=n.line_connectNull?i.filterRemoveNull(t.values):t.values,o=0,s=0;return i.isAreaType(t)?(i.isStepType(t)&&(r=i.convertValuesToStep(r)),e=a.interpolate(i.getInterpolate(t))(r)):(r[0]&&(o=i.x(r[0].x),s=i.getYScale(t.id)(r[0].value)),e=n.axis_rotated?"M "+s+" "+o:"M "+o+" "+s),e||"M 0 0"}},C.getAreaBaseValue=function(){return 0},C.generateGetAreaPoints=function(t,e){var i=this,n=i.config,a=t.__max__+1,r=i.getShapeX(0,a,t,!!e),o=i.getShapeY(!!e),s=i.getShapeOffset(i.isAreaType,t,!!e),c=e?i.getSubYScale:i.getYScale;return function(t,e){var a=c.call(i,t.id)(0),d=s(t,e)||a,l=r(t),u=o(t);return n.axis_rotated&&(00?(t=i.getShapeIndices(i.isLineType),e=i.generateGetLinePoints(t),i.circleY=function(t,i){return e(t,i)[0][1]}):i.circleY=function(t){return i.getYScale(t.id)(t.value)}},C.getCircles=function(t,e){var i=this;return(e?i.main.selectAll("."+r.circles+i.getTargetSelectorSuffix(e)):i.main).selectAll("."+r.circle+(l(t)?"-"+t:""))},C.expandCircles=function(t,e,i){var n=this,a=n.pointExpandedR.bind(n);i&&n.unexpandCircles(),n.getCircles(t,e).classed(r.EXPANDED,!0).attr("r",a)},C.unexpandCircles=function(t){var e=this,i=e.pointR.bind(e);e.getCircles(t).filter(function(){return e.d3.select(this).classed(r.EXPANDED)}).classed(r.EXPANDED,!1).attr("r",i)},C.pointR=function(t){var e=this,i=e.config;return e.isStepType(t)?0:u(i.point_r)?i.point_r(t):i.point_r},C.pointExpandedR=function(t){var e=this,i=e.config;return i.point_focus_expand_enabled?u(i.point_focus_expand_r)?i.point_focus_expand_r(t):i.point_focus_expand_r?i.point_focus_expand_r:1.75*e.pointR(t):e.pointR(t)},C.pointSelectR=function(t){var e=this,i=e.config;return u(i.point_select_r)?i.point_select_r(t):i.point_select_r?i.point_select_r:4*e.pointR(t)},C.isWithinCircle=function(t,e){var i=this.d3,n=i.mouse(t),a=i.select(t),r=+a.attr("cx"),o=+a.attr("cy");return Math.sqrt(Math.pow(r-n[0],2)+Math.pow(o-n[1],2))0?i:320/(t.hasType("gauge")&&!e.gauge_fullCircle?2:1)},C.getCurrentPaddingTop=function(){var t=this,e=t.config,i=l(e.padding_top)?e.padding_top:0;return t.title&&t.title.node()&&(i+=t.getTitlePadding()),i},C.getCurrentPaddingBottom=function(){var t=this.config;return l(t.padding_bottom)?t.padding_bottom:0},C.getCurrentPaddingLeft=function(t){var e=this,i=e.config;return l(i.padding_left)?i.padding_left:i.axis_rotated?i.axis_x_show?Math.max(_(e.getAxisWidthByAxisId("x",t)),40):1:!i.axis_y_show||i.axis_y_inner?e.axis.getYAxisLabelPosition().isOuter?30:1:_(e.getAxisWidthByAxisId("y",t))},C.getCurrentPaddingRight=function(){var t=this,e=t.config,i=t.isLegendRight?t.getLegendWidth()+20:0;return l(e.padding_right)?e.padding_right+1:e.axis_rotated?10+i:!e.axis_y2_show||e.axis_y2_inner?2+i+(t.axis.getY2AxisLabelPosition().isOuter?20:0):_(t.getAxisWidthByAxisId("y2"))+i},C.getParentRectValue=function(t){for(var e,i=this.selectChart.node();i&&"BODY"!==i.tagName;){try{e=i.getBoundingClientRect()[t]}catch(n){"width"===t&&(e=i.offsetWidth)}if(e)break;i=i.parentNode}return e},C.getParentWidth=function(){return this.getParentRectValue("width")},C.getParentHeight=function(){var t=this.selectChart.style("height");return t.indexOf("px")>0?+t.replace("px",""):0},C.getSvgLeft=function(t){var e=this,i=e.config,n=i.axis_rotated||!i.axis_rotated&&!i.axis_y_inner,a=i.axis_rotated?r.axisX:r.axisY,o=e.main.select("."+a).node(),s=o&&n?o.getBoundingClientRect():{right:0},c=e.selectChart.node().getBoundingClientRect(),d=e.hasArcType(),l=s.right-c.left-(d?0:e.getCurrentPaddingLeft(t));return l>0?l:0},C.getAxisWidthByAxisId=function(t,e){var i=this,n=i.axis.getLabelPositionById(t);return i.axis.getMaxTickWidth(t,e)+(n.isInner?20:40)},C.getHorizontalAxisHeight=function(t){var e=this,i=e.config,n=30;return"x"!==t||i.axis_x_show?"x"===t&&i.axis_x_height?i.axis_x_height:"y"!==t||i.axis_y_show?"y2"!==t||i.axis_y2_show?("x"===t&&!i.axis_rotated&&i.axis_x_tick_rotate&&(n=30+e.axis.getMaxTickWidth(t)*Math.cos(Math.PI*(90-i.axis_x_tick_rotate)/180)),"y"===t&&i.axis_rotated&&i.axis_y_tick_rotate&&(n=30+e.axis.getMaxTickWidth(t)*Math.cos(Math.PI*(90-i.axis_y_tick_rotate)/180)),n+(e.axis.getLabelPositionById(t).isInner?0:10)+("y2"===t?-10:0)):e.rotated_padding_top:!i.legend_show||e.isLegendRight||e.isLegendInset?1:10:8},C.getEventRectWidth=function(){return Math.max(0,this.xAxis.tickInterval())},C.initBrush=function(){var t=this,e=t.d3;t.brush=e.svg.brush().on("brush",function(){t.redrawForBrush()}),t.brush.update=function(){return t.context&&t.context.select("."+r.brush).call(this),this},t.brush.scale=function(e){return t.config.axis_rotated?this.y(e):this.x(e)}},C.initSubchart=function(){var t=this,e=t.config,i=t.context=t.svg.append("g").attr("transform",t.getTranslate("context")),n=e.subchart_show?"visible":"hidden";i.style("visibility",n),i.append("g").attr("clip-path",t.clipPathForSubchart).attr("class",r.chart),i.select("."+r.chart).append("g").attr("class",r.chartBars),i.select("."+r.chart).append("g").attr("class",r.chartLines),i.append("g").attr("clip-path",t.clipPath).attr("class",r.brush).call(t.brush),t.axes.subx=i.append("g").attr("class",r.axisX).attr("transform",t.getTranslate("subx")).attr("clip-path",e.axis_rotated?"":t.clipPathForXAxis).style("visibility",e.subchart_axis_x_show?n:"hidden")},C.updateTargetsForSubchart=function(t){var e,i=this,n=i.context,a=i.config,o=i.classChartBar.bind(i),s=i.classBars.bind(i),c=i.classChartLine.bind(i),d=i.classLines.bind(i),l=i.classAreas.bind(i);a.subchart_show&&(n.select("."+r.chartBars).selectAll("."+r.chartBar).data(t).attr("class",o).enter().append("g").style("opacity",0).attr("class",o).append("g").attr("class",s),(e=n.select("."+r.chartLines).selectAll("."+r.chartLine).data(t).attr("class",c).enter().append("g").style("opacity",0).attr("class",c)).append("g").attr("class",d),e.append("g").attr("class",l),n.selectAll("."+r.brush+" rect").attr(a.axis_rotated?"width":"height",a.axis_rotated?i.width2:i.height2))},C.updateBarForSubchart=function(t){var e=this;e.contextBar=e.context.selectAll("."+r.bars).selectAll("."+r.bar).data(e.barData.bind(e)),e.contextBar.enter().append("path").attr("class",e.classBar.bind(e)).style("stroke","none").style("fill",e.color),e.contextBar.style("opacity",e.initialOpacity.bind(e)),e.contextBar.exit().transition().duration(t).style("opacity",0).remove()},C.redrawBarForSubchart=function(t,e,i){(e?this.contextBar.transition(Math.random().toString()).duration(i):this.contextBar).attr("d",t).style("opacity",1)},C.updateLineForSubchart=function(t){var e=this;e.contextLine=e.context.selectAll("."+r.lines).selectAll("."+r.line).data(e.lineData.bind(e)),e.contextLine.enter().append("path").attr("class",e.classLine.bind(e)).style("stroke",e.color),e.contextLine.style("opacity",e.initialOpacity.bind(e)),e.contextLine.exit().transition().duration(t).style("opacity",0).remove()},C.redrawLineForSubchart=function(t,e,i){(e?this.contextLine.transition(Math.random().toString()).duration(i):this.contextLine).attr("d",t).style("opacity",1)},C.updateAreaForSubchart=function(t){var e=this,i=e.d3;e.contextArea=e.context.selectAll("."+r.areas).selectAll("."+r.area).data(e.lineData.bind(e)),e.contextArea.enter().append("path").attr("class",e.classArea.bind(e)).style("fill",e.color).style("opacity",function(){return e.orgAreaOpacity=+i.select(this).style("opacity"),0}),e.contextArea.style("opacity",0),e.contextArea.exit().transition().duration(t).style("opacity",0).remove()},C.redrawAreaForSubchart=function(t,e,i){(e?this.contextArea.transition(Math.random().toString()).duration(i):this.contextArea).attr("d",t).style("fill",this.color).style("opacity",this.orgAreaOpacity)},C.redrawSubchart=function(t,e,i,n,a,r,o){var s,c,d,l=this,u=l.d3,h=l.config;l.context.style("visibility",h.subchart_show?"visible":"hidden"),h.subchart_show&&(u.event&&"zoom"===u.event.type&&l.brush.extent(l.x.orgDomain()).update(),t&&(l.brush.empty()||l.brush.extent(l.x.orgDomain()).update(),s=l.generateDrawArea(a,!0),c=l.generateDrawBar(r,!0),d=l.generateDrawLine(o,!0),l.updateBarForSubchart(i),l.updateLineForSubchart(i),l.updateAreaForSubchart(i),l.redrawBarForSubchart(c,i,i),l.redrawLineForSubchart(d,i,i),l.redrawAreaForSubchart(s,i,i)))},C.redrawForBrush=function(){var t=this,e=t.x;t.redraw({withTransition:!1,withY:t.config.zoom_rescale,withSubchart:!1,withUpdateXDomain:!0,withDimension:!1}),t.config.subchart_onbrush.call(t.api,e.orgDomain())},C.transformContext=function(t,e){var i,n=this;e&&e.axisSubX?i=e.axisSubX:(i=n.context.select("."+r.axisX),t&&(i=i.transition())),n.context.attr("transform",n.getTranslate("context")),i.attr("transform",n.getTranslate("subx"))},C.getDefaultExtent=function(){var t=this,e=t.config,i=u(e.axis_x_extent)?e.axis_x_extent(t.getXDomain(t.data.targets)):e.axis_x_extent;return t.isTimeSeries()&&(i=[t.parseDate(i[0]),t.parseDate(i[1])]),i},C.initText=function(){var t=this;t.main.select("."+r.chart).append("g").attr("class",r.chartTexts),t.mainText=t.d3.selectAll([])},C.updateTargetsForText=function(t){var e=this,i=e.classChartText.bind(e),n=e.classTexts.bind(e),a=e.classFocus.bind(e);e.main.select("."+r.chartTexts).selectAll("."+r.chartText).data(t).attr("class",function(t){return i(t)+a(t)}).enter().append("g").attr("class",i).style("opacity",0).style("pointer-events","none").append("g").attr("class",n)},C.updateText=function(t){var e=this,i=e.config,n=e.barOrLineData.bind(e),a=e.classText.bind(e);e.mainText=e.main.selectAll("."+r.texts).selectAll("."+r.text).data(n),e.mainText.enter().append("text").attr("class",a).attr("text-anchor",function(t){return i.axis_rotated?t.value<0?"end":"start":"middle"}).style("stroke","none").style("fill",function(t){return e.color(t)}).style("fill-opacity",0),e.mainText.text(function(t,i,n){return e.dataLabelFormat(t.id)(t.value,t.id,i,n)}),e.mainText.exit().transition().duration(t).style("fill-opacity",0).remove()},C.redrawText=function(t,e,i,n){return[(n?this.mainText.transition():this.mainText).attr("x",t).attr("y",e).style("fill",this.color).style("fill-opacity",i?0:this.opacityForText.bind(this))]},C.getTextRect=function(t,e,i){var n,a=this.d3.select("body").append("div").classed("c3",!0),r=a.append("svg").style("visibility","hidden").style("position","fixed").style("top",0).style("left",0),o=this.d3.select(i).style("font");return r.selectAll(".dummy").data([t]).enter().append("text").classed(e||"",!0).style("font",o).text(t).each(function(){n=this.getBoundingClientRect()}),a.remove(),n},C.generateXYForText=function(t,e,i,n){var a=this,r=a.generateGetAreaPoints(t,!1),o=a.generateGetBarPoints(e,!1),s=a.generateGetLinePoints(i,!1),c=n?a.getXForText:a.getYForText;return function(t,e){var i=a.isAreaType(t)?r:a.isBarType(t)?o:s;return c.call(a,i(t,e),t,this)}},C.getXForText=function(t,e,i){var n,a,r=this,o=i.getBoundingClientRect();return r.config.axis_rotated?(a=r.isBarType(e)?4:6,n=t[2][1]+a*(e.value<0?-1:1)):n=r.hasType("bar")?(t[2][0]+t[0][0])/2:t[0][0],null===e.value&&(n>r.width?n=r.width-o.width:n<0&&(n=4)),n},C.getYForText=function(t,e,i){var n,a=this,r=i.getBoundingClientRect();return a.config.axis_rotated?n=(t[0][0]+t[2][0]+.6*r.height)/2:(n=t[2][1],e.value<0||0===e.value&&!a.hasPositiveValue?(n+=r.height,a.isBarType(e)&&a.isSafari()?n-=3:!a.isBarType(e)&&a.isChrome()&&(n+=3)):n+=a.isBarType(e)?-3:-6),null!==e.value||a.config.axis_rotated||(nthis.height&&(n=this.height-4)),n},C.initTitle=function(){var t=this;t.title=t.svg.append("text").text(t.config.title_text).attr("class",t.CLASS.title)},C.redrawTitle=function(){var t=this;t.title.attr("x",t.xForTitle.bind(t)).attr("y",t.yForTitle.bind(t))},C.xForTitle=function(){var t=this,e=t.config,i=e.title_position||"left";return i.indexOf("right")>=0?t.currentWidth-t.getTextRect(t.title.node().textContent,t.CLASS.title,t.title.node()).width-e.title_padding.right:i.indexOf("center")>=0?(t.currentWidth-t.getTextRect(t.title.node().textContent,t.CLASS.title,t.title.node()).width)/2:e.title_padding.left},C.yForTitle=function(){var t=this;return t.config.title_padding.top+t.getTextRect(t.title.node().textContent,t.CLASS.title,t.title.node()).height},C.getTitlePadding=function(){var t=this;return t.yForTitle()+t.config.title_padding.bottom},C.initTooltip=function(){var t,e=this,i=e.config;if(e.tooltip=e.selectChart.style("position","relative").append("div").attr("class",r.tooltipContainer).style("position","absolute").style("pointer-events","none").style("display","none"),i.tooltip_init_show){if(e.isTimeSeries()&&g(i.tooltip_init_x)){for(i.tooltip_init_x=e.parseDate(i.tooltip_init_x),t=0;t"+(o||0===o?""+o+"":"")),void 0!==(s=b(f(t[r].value,t[r].ratio,t[r].id,t[r].index,t))))){if(null===t[r].name)continue;c=b(g(t[r].name,t[r].ratio,t[r].id,t[r].index)),d=l.levelColor?l.levelColor(t[r].value):n(t[r].id),a+="",a+=""+c+"",a+=""+s+"",a+=""}return a+""},C.tooltipPosition=function(t,e,i,n){var a,r,o,s,c,d=this,l=d.config,u=d.d3,h=d.hasArcType(),g=u.mouse(n);return h?(r=(d.width-(d.isLegendRight?d.getLegendWidth():0))/2+g[0],s=d.height/2+g[1]+20):(a=d.getSvgLeft(!0),l.axis_rotated?(o=(r=a+g[0]+100)+e,c=d.currentWidth-d.getCurrentPaddingRight(),s=d.x(t[0].x)+20):(o=(r=a+d.getCurrentPaddingLeft(!0)+d.x(t[0].x)+20)+e,c=a+d.currentWidth-d.getCurrentPaddingRight(),s=g[1]+15),o>c&&(r-=o-c+20),s+i>d.currentHeight&&(s-=i+30)),s<0&&(s=0),{top:s,left:r}},C.showTooltip=function(t,e){var i,n,a,r=this,o=r.config,s=r.hasArcType(),c=t.filter(function(t){return t&&l(t.value)}),d=o.tooltip_position||C.tooltipPosition;0!==c.length&&o.tooltip_show&&(r.tooltip.html(o.tooltip_contents.call(r,t,r.axis.getXAxisTickFormat(),r.getYFormat(s),r.color)).style("display","block"),i=r.tooltip.property("offsetWidth"),n=r.tooltip.property("offsetHeight"),a=d.call(this,c,i,n,e),r.tooltip.style("top",a.top+"px").style("left",a.left+"px"))},C.hideTooltip=function(){this.tooltip.style("display","none")},C.setTargetType=function(t,e){var i=this,n=i.config;i.mapToTargetIds(t).forEach(function(t){i.withoutFadeIn[t]=e===n.data_types[t],n.data_types[t]=e}),t||(n.data_type=e)},C.hasType=function(t,e){var i=this,n=i.config.data_types,a=!1;return e=e||i.data.targets,e&&e.length?e.forEach(function(e){var i=n[e.id];(i&&i.indexOf(t)>=0||!i&&"line"===t)&&(a=!0)}):Object.keys(n).length?Object.keys(n).forEach(function(e){n[e]===t&&(a=!0)}):a=i.config.data_type===t,a},C.hasArcType=function(t){return this.hasType("pie",t)||this.hasType("donut",t)||this.hasType("gauge",t)},C.isLineType=function(t){var e=this.config,i=g(t)?t:t.id;return!e.data_types[i]||["line","spline","area","area-spline","step","area-step"].indexOf(e.data_types[i])>=0},C.isStepType=function(t){var e=g(t)?t:t.id;return["step","area-step"].indexOf(this.config.data_types[e])>=0},C.isSplineType=function(t){var e=g(t)?t:t.id;return["spline","area-spline"].indexOf(this.config.data_types[e])>=0},C.isAreaType=function(t){var e=g(t)?t:t.id;return["area","area-spline","area-step"].indexOf(this.config.data_types[e])>=0},C.isBarType=function(t){var e=g(t)?t:t.id;return"bar"===this.config.data_types[e]},C.isScatterType=function(t){var e=g(t)?t:t.id;return"scatter"===this.config.data_types[e]},C.isPieType=function(t){var e=g(t)?t:t.id;return"pie"===this.config.data_types[e]},C.isGaugeType=function(t){var e=g(t)?t:t.id;return"gauge"===this.config.data_types[e]},C.isDonutType=function(t){var e=g(t)?t:t.id;return"donut"===this.config.data_types[e]},C.isArcType=function(t){return this.isPieType(t)||this.isDonutType(t)||this.isGaugeType(t)},C.lineData=function(t){return this.isLineType(t)?[t]:[]},C.arcData=function(t){return this.isArcType(t.data)?[t]:[]},C.barData=function(t){return this.isBarType(t)?t.values:[]},C.lineOrScatterData=function(t){return this.isLineType(t)||this.isScatterType(t)?t.values:[]},C.barOrLineData=function(t){return this.isBarType(t)||this.isLineType(t)?t.values:[]},C.isInterpolationType=function(t){return["linear","linear-closed","basis","basis-open","basis-closed","bundle","cardinal","cardinal-open","cardinal-closed","monotone"].indexOf(t)>=0},C.isSafari=function(){var t=window.navigator.userAgent;return t.indexOf("Safari")>=0&&t.indexOf("Chrome")<0},C.isChrome=function(){return window.navigator.userAgent.indexOf("Chrome")>=0},C.initZoom=function(){var t,e=this,i=e.d3,n=e.config;e.zoom=i.behavior.zoom().on("zoomstart",function(){t=i.event.sourceEvent,e.zoom.altDomain=i.event.sourceEvent.altKey?e.x.orgDomain():null,n.zoom_onzoomstart.call(e.api,i.event.sourceEvent)}).on("zoom",function(){e.redrawForZoom.call(e)}).on("zoomend",function(){var a=i.event.sourceEvent;a&&t.clientX===a.clientX&&t.clientY===a.clientY||(e.redrawEventRect(),e.updateZoom(),n.zoom_onzoomend.call(e.api,e.x.orgDomain()))}),e.zoom.scale=function(t){return n.axis_rotated?this.y(t):this.x(t)},e.zoom.orgScaleExtent=function(){var t=n.zoom_extent?n.zoom_extent:[1,10];return[t[0],Math.max(e.getMaxDataCount()/t[1],t[1])]},e.zoom.updateScaleExtent=function(){var t=m(e.x.orgDomain())/m(e.getZoomDomain()),i=this.orgScaleExtent();return this.scaleExtent([i[0]*t,i[1]*t]),this}},C.getZoomDomain=function(){var t=this,e=t.config,i=t.d3;return[i.min([t.orgXDomain[0],e.zoom_x_min]),i.max([t.orgXDomain[1],e.zoom_x_max])]},C.updateZoom=function(){var t=this,e=t.config.zoom_enabled?t.zoom:function(){};t.main.select("."+r.zoomRect).call(e).on("dblclick.zoom",null),t.main.selectAll("."+r.eventRect).call(e).on("dblclick.zoom",null)},C.redrawForZoom=function(){var t=this,e=t.d3,i=t.config,n=t.zoom,a=t.x;if(i.zoom_enabled&&0!==t.filterTargetsToShow(t.data.targets).length){if("mousemove"===e.event.sourceEvent.type&&n.altDomain)return a.domain(n.altDomain),void n.scale(a).updateScaleExtent();t.isCategorized()&&a.orgDomain()[0]===t.orgXDomain[0]&&a.domain([t.orgXDomain[0]-1e-10,a.orgDomain()[1]]),t.redraw({withTransition:!1,withY:i.zoom_rescale,withSubchart:!1,withEventRect:!1,withDimension:!1}),"mousemove"===e.event.sourceEvent.type&&(t.cancelClick=!0),i.zoom_onzoom.call(t.api,a.orgDomain())}},L}); diff --git a/web/gui/lib/morris-0.5.1.min.js b/web/gui/lib/morris-0.5.1.min.js deleted file mode 100644 index 2e4983271..000000000 --- a/web/gui/lib/morris-0.5.1.min.js +++ /dev/null @@ -1,8 +0,0 @@ -/* @license -morris.js v0.5.0 -Copyright 2014 Olly Smith All rights reserved. -Licensed under the BSD-2-Clause License. -SPDX-License-Identifier: BSD-2-Clause -*/ -(function(){var a,b,c,d,e=[].slice,f=function(a,b){return function(){return a.apply(b,arguments)}},g={}.hasOwnProperty,h=function(a,b){function c(){this.constructor=a}for(var d in b)g.call(b,d)&&(a[d]=b[d]);return c.prototype=b.prototype,a.prototype=new c,a.__super__=b.prototype,a},i=[].indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(b in this&&this[b]===a)return b;return-1};b=window.Morris={},a=jQuery,b.EventEmitter=function(){function a(){}return a.prototype.on=function(a,b){return null==this.handlers&&(this.handlers={}),null==this.handlers[a]&&(this.handlers[a]=[]),this.handlers[a].push(b),this},a.prototype.fire=function(){var a,b,c,d,f,g,h;if(c=arguments[0],a=2<=arguments.length?e.call(arguments,1):[],null!=this.handlers&&null!=this.handlers[c]){for(g=this.handlers[c],h=[],d=0,f=g.length;f>d;d++)b=g[d],h.push(b.apply(null,a));return h}},a}(),b.commas=function(a){var b,c,d,e;return null!=a?(d=0>a?"-":"",b=Math.abs(a),c=Math.floor(b).toFixed(0),d+=c.replace(/(?=(?:\d{3})+$)(?!^)/g,","),e=b.toString(),e.length>c.length&&(d+=e.slice(c.length)),d):"-"},b.pad2=function(a){return(10>a?"0":"")+a},b.Grid=function(c){function d(b){this.resizeHandler=f(this.resizeHandler,this);var c=this;if(this.el="string"==typeof b.element?a(document.getElementById(b.element)):a(b.element),null==this.el||0===this.el.length)throw new Error("Graph container element not found");"static"===this.el.css("position")&&this.el.css("position","relative"),this.options=a.extend({},this.gridDefaults,this.defaults||{},b),"string"==typeof this.options.units&&(this.options.postUnits=b.units),this.raphael=new Raphael(this.el[0]),this.elementWidth=null,this.elementHeight=null,this.dirty=!1,this.selectFrom=null,this.init&&this.init(),this.setData(this.options.data),this.el.bind("mousemove",function(a){var b,d,e,f,g;return d=c.el.offset(),g=a.pageX-d.left,c.selectFrom?(b=c.data[c.hitTest(Math.min(g,c.selectFrom))]._x,e=c.data[c.hitTest(Math.max(g,c.selectFrom))]._x,f=e-b,c.selectionRect.attr({x:b,width:f})):c.fire("hovermove",g,a.pageY-d.top)}),this.el.bind("mouseleave",function(){return c.selectFrom&&(c.selectionRect.hide(),c.selectFrom=null),c.fire("hoverout")}),this.el.bind("touchstart touchmove touchend",function(a){var b,d;return d=a.originalEvent.touches[0]||a.originalEvent.changedTouches[0],b=c.el.offset(),c.fire("hovermove",d.pageX-b.left,d.pageY-b.top)}),this.el.bind("click",function(a){var b;return b=c.el.offset(),c.fire("gridclick",a.pageX-b.left,a.pageY-b.top)}),this.options.rangeSelect&&(this.selectionRect=this.raphael.rect(0,0,0,this.el.innerHeight()).attr({fill:this.options.rangeSelectColor,stroke:!1}).toBack().hide(),this.el.bind("mousedown",function(a){var b;return b=c.el.offset(),c.startRange(a.pageX-b.left)}),this.el.bind("mouseup",function(a){var b;return b=c.el.offset(),c.endRange(a.pageX-b.left),c.fire("hovermove",a.pageX-b.left,a.pageY-b.top)})),this.options.resize&&a(window).bind("resize",function(){return null!=c.timeoutId&&window.clearTimeout(c.timeoutId),c.timeoutId=window.setTimeout(c.resizeHandler,100)}),this.el.css("-webkit-tap-highlight-color","rgba(0,0,0,0)"),this.postInit&&this.postInit()}return h(d,c),d.prototype.gridDefaults={dateFormat:null,axes:!0,grid:!0,gridLineColor:"#aaa",gridStrokeWidth:.5,gridTextColor:"#888",gridTextSize:12,gridTextFamily:"sans-serif",gridTextWeight:"normal",hideHover:!1,yLabelFormat:null,xLabelAngle:0,numLines:5,padding:25,parseTime:!0,postUnits:"",preUnits:"",ymax:"auto",ymin:"auto 0",goals:[],goalStrokeWidth:1,goalLineColors:["#666633","#999966","#cc6666","#663333"],events:[],eventStrokeWidth:1,eventLineColors:["#005a04","#ccffbb","#3a5f0b","#005502"],rangeSelect:null,rangeSelectColor:"#eef",resize:!1},d.prototype.setData=function(a,c){var d,e,f,g,h,i,j,k,l,m,n,o,p,q,r;return null==c&&(c=!0),this.options.data=a,null==a||0===a.length?(this.data=[],this.raphael.clear(),null!=this.hover&&this.hover.hide(),void 0):(o=this.cumulative?0:null,p=this.cumulative?0:null,this.options.goals.length>0&&(h=Math.min.apply(Math,this.options.goals),g=Math.max.apply(Math,this.options.goals),p=null!=p?Math.min(p,h):h,o=null!=o?Math.max(o,g):g),this.data=function(){var c,d,g;for(g=[],f=c=0,d=a.length;d>c;f=++c)j=a[f],i={src:j},i.label=j[this.options.xkey],this.options.parseTime?(i.x=b.parseDate(i.label),this.options.dateFormat?i.label=this.options.dateFormat(i.x):"number"==typeof i.label&&(i.label=new Date(i.label).toString())):(i.x=f,this.options.xLabelFormat&&(i.label=this.options.xLabelFormat(i))),l=0,i.y=function(){var a,b,c,d;for(c=this.options.ykeys,d=[],e=a=0,b=c.length;b>a;e=++a)n=c[e],q=j[n],"string"==typeof q&&(q=parseFloat(q)),null!=q&&"number"!=typeof q&&(q=null),null!=q&&(this.cumulative?l+=q:null!=o?(o=Math.max(q,o),p=Math.min(q,p)):o=p=q),this.cumulative&&null!=l&&(o=Math.max(l,o),p=Math.min(l,p)),d.push(q);return d}.call(this),g.push(i);return g}.call(this),this.options.parseTime&&(this.data=this.data.sort(function(a,b){return(a.x>b.x)-(b.x>a.x)})),this.xmin=this.data[0].x,this.xmax=this.data[this.data.length-1].x,this.events=[],this.options.events.length>0&&(this.events=this.options.parseTime?function(){var a,c,e,f;for(e=this.options.events,f=[],a=0,c=e.length;c>a;a++)d=e[a],f.push(b.parseDate(d));return f}.call(this):this.options.events,this.xmax=Math.max(this.xmax,Math.max.apply(Math,this.events)),this.xmin=Math.min(this.xmin,Math.min.apply(Math,this.events))),this.xmin===this.xmax&&(this.xmin-=1,this.xmax+=1),this.ymin=this.yboundary("min",p),this.ymax=this.yboundary("max",o),this.ymin===this.ymax&&(p&&(this.ymin-=1),this.ymax+=1),((r=this.options.axes)===!0||"both"===r||"y"===r||this.options.grid===!0)&&(this.options.ymax===this.gridDefaults.ymax&&this.options.ymin===this.gridDefaults.ymin?(this.grid=this.autoGridLines(this.ymin,this.ymax,this.options.numLines),this.ymin=Math.min(this.ymin,this.grid[0]),this.ymax=Math.max(this.ymax,this.grid[this.grid.length-1])):(k=(this.ymax-this.ymin)/(this.options.numLines-1),this.grid=function(){var a,b,c,d;for(d=[],m=a=b=this.ymin,c=this.ymax;k>0?c>=a:a>=c;m=a+=k)d.push(m);return d}.call(this))),this.dirty=!0,c?this.redraw():void 0)},d.prototype.yboundary=function(a,b){var c,d;return c=this.options["y"+a],"string"==typeof c?"auto"===c.slice(0,4)?c.length>5?(d=parseInt(c.slice(5),10),null==b?d:Math[a](b,d)):null!=b?b:0:parseInt(c,10):c},d.prototype.autoGridLines=function(a,b,c){var d,e,f,g,h,i,j,k,l;return h=b-a,l=Math.floor(Math.log(h)/Math.log(10)),j=Math.pow(10,l),e=Math.floor(a/j)*j,d=Math.ceil(b/j)*j,i=(d-e)/(c-1),1===j&&i>1&&Math.ceil(i)!==i&&(i=Math.ceil(i),d=e+i*(c-1)),0>e&&d>0&&(e=Math.floor(a/i)*i,d=Math.ceil(b/i)*i),1>i?(g=Math.floor(Math.log(i)/Math.log(10)),f=function(){var a,b;for(b=[],k=a=e;i>0?d>=a:a>=d;k=a+=i)b.push(parseFloat(k.toFixed(1-g)));return b}()):f=function(){var a,b;for(b=[],k=a=e;i>0?d>=a:a>=d;k=a+=i)b.push(k);return b}(),f},d.prototype._calc=function(){var a,b,c,d,e,f,g,h;return e=this.el.width(),c=this.el.height(),(this.elementWidth!==e||this.elementHeight!==c||this.dirty)&&(this.elementWidth=e,this.elementHeight=c,this.dirty=!1,this.left=this.options.padding,this.right=this.elementWidth-this.options.padding,this.top=this.options.padding,this.bottom=this.elementHeight-this.options.padding,((g=this.options.axes)===!0||"both"===g||"y"===g)&&(f=function(){var a,c,d,e;for(d=this.grid,e=[],a=0,c=d.length;c>a;a++)b=d[a],e.push(this.measureText(this.yAxisFormat(b)).width);return e}.call(this),this.left+=Math.max.apply(Math,f)),((h=this.options.axes)===!0||"both"===h||"x"===h)&&(a=function(){var a,b,c;for(c=[],d=a=0,b=this.data.length;b>=0?b>a:a>b;d=b>=0?++a:--a)c.push(this.measureText(this.data[d].text,-this.options.xLabelAngle).height);return c}.call(this),this.bottom-=Math.max.apply(Math,a)),this.width=Math.max(1,this.right-this.left),this.height=Math.max(1,this.bottom-this.top),this.dx=this.width/(this.xmax-this.xmin),this.dy=this.height/(this.ymax-this.ymin),this.calc)?this.calc():void 0},d.prototype.transY=function(a){return this.bottom-(a-this.ymin)*this.dy},d.prototype.transX=function(a){return 1===this.data.length?(this.left+this.right)/2:this.left+(a-this.xmin)*this.dx},d.prototype.redraw=function(){return this.raphael.clear(),this._calc(),this.drawGrid(),this.drawGoals(),this.drawEvents(),this.draw?this.draw():void 0},d.prototype.measureText=function(a,b){var c,d;return null==b&&(b=0),d=this.raphael.text(100,100,a).attr("font-size",this.options.gridTextSize).attr("font-family",this.options.gridTextFamily).attr("font-weight",this.options.gridTextWeight).rotate(b),c=d.getBBox(),d.remove(),c},d.prototype.yAxisFormat=function(a){return this.yLabelFormat(a)},d.prototype.yLabelFormat=function(a){return"function"==typeof this.options.yLabelFormat?this.options.yLabelFormat(a):""+this.options.preUnits+b.commas(a)+this.options.postUnits},d.prototype.drawGrid=function(){var a,b,c,d,e,f,g,h;if(this.options.grid!==!1||(e=this.options.axes)===!0||"both"===e||"y"===e){for(f=this.grid,h=[],c=0,d=f.length;d>c;c++)a=f[c],b=this.transY(a),((g=this.options.axes)===!0||"both"===g||"y"===g)&&this.drawYAxisLabel(this.left-this.options.padding/2,b,this.yAxisFormat(a)),this.options.grid?h.push(this.drawGridLine("M"+this.left+","+b+"H"+(this.left+this.width))):h.push(void 0);return h}},d.prototype.drawGoals=function(){var a,b,c,d,e,f,g;for(f=this.options.goals,g=[],c=d=0,e=f.length;e>d;c=++d)b=f[c],a=this.options.goalLineColors[c%this.options.goalLineColors.length],g.push(this.drawGoal(b,a));return g},d.prototype.drawEvents=function(){var a,b,c,d,e,f,g;for(f=this.events,g=[],c=d=0,e=f.length;e>d;c=++d)b=f[c],a=this.options.eventLineColors[c%this.options.eventLineColors.length],g.push(this.drawEvent(b,a));return g},d.prototype.drawGoal=function(a,b){return this.raphael.path("M"+this.left+","+this.transY(a)+"H"+this.right).attr("stroke",b).attr("stroke-width",this.options.goalStrokeWidth)},d.prototype.drawEvent=function(a,b){return this.raphael.path("M"+this.transX(a)+","+this.bottom+"V"+this.top).attr("stroke",b).attr("stroke-width",this.options.eventStrokeWidth)},d.prototype.drawYAxisLabel=function(a,b,c){return this.raphael.text(a,b,c).attr("font-size",this.options.gridTextSize).attr("font-family",this.options.gridTextFamily).attr("font-weight",this.options.gridTextWeight).attr("fill",this.options.gridTextColor).attr("text-anchor","end")},d.prototype.drawGridLine=function(a){return this.raphael.path(a).attr("stroke",this.options.gridLineColor).attr("stroke-width",this.options.gridStrokeWidth)},d.prototype.startRange=function(a){return this.hover.hide(),this.selectFrom=a,this.selectionRect.attr({x:a,width:0}).show()},d.prototype.endRange=function(a){var b,c;return this.selectFrom?(c=Math.min(this.selectFrom,a),b=Math.max(this.selectFrom,a),this.options.rangeSelect.call(this.el,{start:this.data[this.hitTest(c)].x,end:this.data[this.hitTest(b)].x}),this.selectFrom=null):void 0},d.prototype.resizeHandler=function(){return this.timeoutId=null,this.raphael.setSize(this.el.width(),this.el.height()),this.redraw()},d}(b.EventEmitter),b.parseDate=function(a){var b,c,d,e,f,g,h,i,j,k,l;return"number"==typeof a?a:(c=a.match(/^(\d+) Q(\d)$/),e=a.match(/^(\d+)-(\d+)$/),f=a.match(/^(\d+)-(\d+)-(\d+)$/),h=a.match(/^(\d+) W(\d+)$/),i=a.match(/^(\d+)-(\d+)-(\d+)[ T](\d+):(\d+)(Z|([+-])(\d\d):?(\d\d))?$/),j=a.match(/^(\d+)-(\d+)-(\d+)[ T](\d+):(\d+):(\d+(\.\d+)?)(Z|([+-])(\d\d):?(\d\d))?$/),c?new Date(parseInt(c[1],10),3*parseInt(c[2],10)-1,1).getTime():e?new Date(parseInt(e[1],10),parseInt(e[2],10)-1,1).getTime():f?new Date(parseInt(f[1],10),parseInt(f[2],10)-1,parseInt(f[3],10)).getTime():h?(k=new Date(parseInt(h[1],10),0,1),4!==k.getDay()&&k.setMonth(0,1+(4-k.getDay()+7)%7),k.getTime()+6048e5*parseInt(h[2],10)):i?i[6]?(g=0,"Z"!==i[6]&&(g=60*parseInt(i[8],10)+parseInt(i[9],10),"+"===i[7]&&(g=0-g)),Date.UTC(parseInt(i[1],10),parseInt(i[2],10)-1,parseInt(i[3],10),parseInt(i[4],10),parseInt(i[5],10)+g)):new Date(parseInt(i[1],10),parseInt(i[2],10)-1,parseInt(i[3],10),parseInt(i[4],10),parseInt(i[5],10)).getTime():j?(l=parseFloat(j[6]),b=Math.floor(l),d=Math.round(1e3*(l-b)),j[8]?(g=0,"Z"!==j[8]&&(g=60*parseInt(j[10],10)+parseInt(j[11],10),"+"===j[9]&&(g=0-g)),Date.UTC(parseInt(j[1],10),parseInt(j[2],10)-1,parseInt(j[3],10),parseInt(j[4],10),parseInt(j[5],10)+g,b,d)):new Date(parseInt(j[1],10),parseInt(j[2],10)-1,parseInt(j[3],10),parseInt(j[4],10),parseInt(j[5],10),b,d).getTime()):new Date(parseInt(a,10),0,1).getTime())},b.Hover=function(){function c(c){null==c&&(c={}),this.options=a.extend({},b.Hover.defaults,c),this.el=a("
"),this.el.hide(),this.options.parent.append(this.el)}return c.defaults={"class":"morris-hover morris-default-style"},c.prototype.update=function(a,b,c){return a?(this.html(a),this.show(),this.moveTo(b,c)):this.hide()},c.prototype.html=function(a){return this.el.html(a)},c.prototype.moveTo=function(a,b){var c,d,e,f,g,h;return g=this.options.parent.innerWidth(),f=this.options.parent.innerHeight(),d=this.el.outerWidth(),c=this.el.outerHeight(),e=Math.min(Math.max(0,a-d/2),g-d),null!=b?(h=b-c-10,0>h&&(h=b+10,h+c>f&&(h=f/2-c/2))):h=f/2-c/2,this.el.css({left:e+"px",top:parseInt(h)+"px"})},c.prototype.show=function(){return this.el.show()},c.prototype.hide=function(){return this.el.hide()},c}(),b.Line=function(a){function c(a){return this.hilight=f(this.hilight,this),this.onHoverOut=f(this.onHoverOut,this),this.onHoverMove=f(this.onHoverMove,this),this.onGridClick=f(this.onGridClick,this),this instanceof b.Line?(c.__super__.constructor.call(this,a),void 0):new b.Line(a)}return h(c,a),c.prototype.init=function(){return"always"!==this.options.hideHover?(this.hover=new b.Hover({parent:this.el}),this.on("hovermove",this.onHoverMove),this.on("hoverout",this.onHoverOut),this.on("gridclick",this.onGridClick)):void 0},c.prototype.defaults={lineWidth:3,pointSize:4,lineColors:["#0b62a4","#7A92A3","#4da74d","#afd8f8","#edc240","#cb4b4b","#9440ed"],pointStrokeWidths:[1],pointStrokeColors:["#ffffff"],pointFillColors:[],smooth:!0,xLabels:"auto",xLabelFormat:null,xLabelMargin:24,hideHover:!1},c.prototype.calc=function(){return this.calcPoints(),this.generatePaths()},c.prototype.calcPoints=function(){var a,b,c,d,e,f;for(e=this.data,f=[],c=0,d=e.length;d>c;c++)a=e[c],a._x=this.transX(a.x),a._y=function(){var c,d,e,f;for(e=a.y,f=[],c=0,d=e.length;d>c;c++)b=e[c],null!=b?f.push(this.transY(b)):f.push(b);return f}.call(this),f.push(a._ymax=Math.min.apply(Math,[this.bottom].concat(function(){var c,d,e,f;for(e=a._y,f=[],c=0,d=e.length;d>c;c++)b=e[c],null!=b&&f.push(b);return f}())));return f},c.prototype.hitTest=function(a){var b,c,d,e,f;if(0===this.data.length)return null;for(f=this.data.slice(1),b=d=0,e=f.length;e>d&&(c=f[b],!(a<(c._x+this.data[b]._x)/2));b=++d);return b},c.prototype.onGridClick=function(a,b){var c;return c=this.hitTest(a),this.fire("click",c,this.data[c].src,a,b)},c.prototype.onHoverMove=function(a){var b;return b=this.hitTest(a),this.displayHoverForRow(b)},c.prototype.onHoverOut=function(){return this.options.hideHover!==!1?this.displayHoverForRow(null):void 0},c.prototype.displayHoverForRow=function(a){var b;return null!=a?((b=this.hover).update.apply(b,this.hoverContentForRow(a)),this.hilight(a)):(this.hover.hide(),this.hilight())},c.prototype.hoverContentForRow=function(a){var b,c,d,e,f,g,h;for(d=this.data[a],b="
"+d.label+"
",h=d.y,c=f=0,g=h.length;g>f;c=++f)e=h[c],b+="
\n "+this.options.labels[c]+":\n "+this.yLabelFormat(e)+"\n
";return"function"==typeof this.options.hoverCallback&&(b=this.options.hoverCallback(a,this.options,b,d.src)),[b,d._x,d._ymax]},c.prototype.generatePaths=function(){var a,c,d,e;return this.paths=function(){var f,g,h,j;for(j=[],c=f=0,g=this.options.ykeys.length;g>=0?g>f:f>g;c=g>=0?++f:--f)e="boolean"==typeof this.options.smooth?this.options.smooth:(h=this.options.ykeys[c],i.call(this.options.smooth,h)>=0),a=function(){var a,b,e,f;for(e=this.data,f=[],a=0,b=e.length;b>a;a++)d=e[a],void 0!==d._y[c]&&f.push({x:d._x,y:d._y[c]});return f}.call(this),a.length>1?j.push(b.Line.createPath(a,e,this.bottom)):j.push(null);return j}.call(this)},c.prototype.draw=function(){var a;return((a=this.options.axes)===!0||"both"===a||"x"===a)&&this.drawXAxis(),this.drawSeries(),this.options.hideHover===!1?this.displayHoverForRow(this.data.length-1):void 0},c.prototype.drawXAxis=function(){var a,c,d,e,f,g,h,i,j,k,l=this;for(h=this.bottom+this.options.padding/2,f=null,e=null,a=function(a,b){var c,d,g,i,j;return c=l.drawXAxisLabel(l.transX(b),h,a),j=c.getBBox(),c.transform("r"+-l.options.xLabelAngle),d=c.getBBox(),c.transform("t0,"+d.height/2+"..."),0!==l.options.xLabelAngle&&(i=-.5*j.width*Math.cos(l.options.xLabelAngle*Math.PI/180),c.transform("t"+i+",0...")),d=c.getBBox(),(null==f||f>=d.x+d.width||null!=e&&e>=d.x)&&d.x>=0&&d.x+d.widtha;a++)g=c[a],d.push([g.label,g.x]);return d}.call(this),d.reverse(),k=[],i=0,j=d.length;j>i;i++)c=d[i],k.push(a(c[0],c[1]));return k},c.prototype.drawSeries=function(){var a,b,c,d,e,f;for(this.seriesPoints=[],a=b=d=this.options.ykeys.length-1;0>=d?0>=b:b>=0;a=0>=d?++b:--b)this._drawLineFor(a);for(f=[],a=c=e=this.options.ykeys.length-1;0>=e?0>=c:c>=0;a=0>=e?++c:--c)f.push(this._drawPointFor(a));return f},c.prototype._drawPointFor=function(a){var b,c,d,e,f,g;for(this.seriesPoints[a]=[],f=this.data,g=[],d=0,e=f.length;e>d;d++)c=f[d],b=null,null!=c._y[a]&&(b=this.drawLinePoint(c._x,c._y[a],this.colorFor(c,a,"point"),a)),g.push(this.seriesPoints[a].push(b));return g},c.prototype._drawLineFor=function(a){var b;return b=this.paths[a],null!==b?this.drawLinePath(b,this.colorFor(null,a,"line"),a):void 0},c.createPath=function(a,c,d){var e,f,g,h,i,j,k,l,m,n,o,p,q,r;for(k="",c&&(g=b.Line.gradients(a)),l={y:null},h=q=0,r=a.length;r>q;h=++q)e=a[h],null!=e.y&&(null!=l.y?c?(f=g[h],j=g[h-1],i=(e.x-l.x)/4,m=l.x+i,o=Math.min(d,l.y+i*j),n=e.x-i,p=Math.min(d,e.y-i*f),k+="C"+m+","+o+","+n+","+p+","+e.x+","+e.y):k+="L"+e.x+","+e.y:c&&null==g[h]||(k+="M"+e.x+","+e.y)),l=e;return k},c.gradients=function(a){var b,c,d,e,f,g,h,i;for(c=function(a,b){return(a.y-b.y)/(a.x-b.x)},i=[],d=g=0,h=a.length;h>g;d=++g)b=a[d],null!=b.y?(e=a[d+1]||{y:null},f=a[d-1]||{y:null},null!=f.y&&null!=e.y?i.push(c(f,e)):null!=f.y?i.push(c(f,b)):null!=e.y?i.push(c(b,e)):i.push(null)):i.push(null);return i},c.prototype.hilight=function(a){var b,c,d,e,f;if(null!==this.prevHilight&&this.prevHilight!==a)for(b=c=0,e=this.seriesPoints.length-1;e>=0?e>=c:c>=e;b=e>=0?++c:--c)this.seriesPoints[b][this.prevHilight]&&this.seriesPoints[b][this.prevHilight].animate(this.pointShrinkSeries(b));if(null!==a&&this.prevHilight!==a)for(b=d=0,f=this.seriesPoints.length-1;f>=0?f>=d:d>=f;b=f>=0?++d:--d)this.seriesPoints[b][a]&&this.seriesPoints[b][a].animate(this.pointGrowSeries(b));return this.prevHilight=a},c.prototype.colorFor=function(a,b,c){return"function"==typeof this.options.lineColors?this.options.lineColors.call(this,a,b,c):"point"===c?this.options.pointFillColors[b%this.options.pointFillColors.length]||this.options.lineColors[b%this.options.lineColors.length]:this.options.lineColors[b%this.options.lineColors.length]},c.prototype.drawXAxisLabel=function(a,b,c){return this.raphael.text(a,b,c).attr("font-size",this.options.gridTextSize).attr("font-family",this.options.gridTextFamily).attr("font-weight",this.options.gridTextWeight).attr("fill",this.options.gridTextColor)},c.prototype.drawLinePath=function(a,b,c){return this.raphael.path(a).attr("stroke",b).attr("stroke-width",this.lineWidthForSeries(c))},c.prototype.drawLinePoint=function(a,b,c,d){return this.raphael.circle(a,b,this.pointSizeForSeries(d)).attr("fill",c).attr("stroke-width",this.pointStrokeWidthForSeries(d)).attr("stroke",this.pointStrokeColorForSeries(d))},c.prototype.pointStrokeWidthForSeries=function(a){return this.options.pointStrokeWidths[a%this.options.pointStrokeWidths.length]},c.prototype.pointStrokeColorForSeries=function(a){return this.options.pointStrokeColors[a%this.options.pointStrokeColors.length]},c.prototype.lineWidthForSeries=function(a){return this.options.lineWidth instanceof Array?this.options.lineWidth[a%this.options.lineWidth.length]:this.options.lineWidth},c.prototype.pointSizeForSeries=function(a){return this.options.pointSize instanceof Array?this.options.pointSize[a%this.options.pointSize.length]:this.options.pointSize},c.prototype.pointGrowSeries=function(a){return Raphael.animation({r:this.pointSizeForSeries(a)+3},25,"linear")},c.prototype.pointShrinkSeries=function(a){return Raphael.animation({r:this.pointSizeForSeries(a)},25,"linear")},c}(b.Grid),b.labelSeries=function(c,d,e,f,g){var h,i,j,k,l,m,n,o,p,q,r;if(j=200*(d-c)/e,i=new Date(c),n=b.LABEL_SPECS[f],void 0===n)for(r=b.AUTO_LABEL_ORDER,p=0,q=r.length;q>p;p++)if(k=r[p],m=b.LABEL_SPECS[k],j>=m.span){n=m;break}for(void 0===n&&(n=b.LABEL_SPECS.second),g&&(n=a.extend({},n,{fmt:g})),h=n.start(i),l=[];(o=h.getTime())<=d;)o>=c&&l.push([n.fmt(h),o]),n.incr(h);return l},c=function(a){return{span:60*a*1e3,start:function(a){return new Date(a.getFullYear(),a.getMonth(),a.getDate(),a.getHours())},fmt:function(a){return""+b.pad2(a.getHours())+":"+b.pad2(a.getMinutes())},incr:function(b){return b.setUTCMinutes(b.getUTCMinutes()+a)}}},d=function(a){return{span:1e3*a,start:function(a){return new Date(a.getFullYear(),a.getMonth(),a.getDate(),a.getHours(),a.getMinutes())},fmt:function(a){return""+b.pad2(a.getHours())+":"+b.pad2(a.getMinutes())+":"+b.pad2(a.getSeconds())},incr:function(b){return b.setUTCSeconds(b.getUTCSeconds()+a)}}},b.LABEL_SPECS={decade:{span:1728e8,start:function(a){return new Date(a.getFullYear()-a.getFullYear()%10,0,1)},fmt:function(a){return""+a.getFullYear()},incr:function(a){return a.setFullYear(a.getFullYear()+10)}},year:{span:1728e7,start:function(a){return new Date(a.getFullYear(),0,1)},fmt:function(a){return""+a.getFullYear()},incr:function(a){return a.setFullYear(a.getFullYear()+1)}},month:{span:24192e5,start:function(a){return new Date(a.getFullYear(),a.getMonth(),1)},fmt:function(a){return""+a.getFullYear()+"-"+b.pad2(a.getMonth()+1)},incr:function(a){return a.setMonth(a.getMonth()+1)}},week:{span:6048e5,start:function(a){return new Date(a.getFullYear(),a.getMonth(),a.getDate())},fmt:function(a){return""+a.getFullYear()+"-"+b.pad2(a.getMonth()+1)+"-"+b.pad2(a.getDate())},incr:function(a){return a.setDate(a.getDate()+7)}},day:{span:864e5,start:function(a){return new Date(a.getFullYear(),a.getMonth(),a.getDate())},fmt:function(a){return""+a.getFullYear()+"-"+b.pad2(a.getMonth()+1)+"-"+b.pad2(a.getDate())},incr:function(a){return a.setDate(a.getDate()+1)}},hour:c(60),"30min":c(30),"15min":c(15),"10min":c(10),"5min":c(5),minute:c(1),"30sec":d(30),"15sec":d(15),"10sec":d(10),"5sec":d(5),second:d(1)},b.AUTO_LABEL_ORDER=["decade","year","month","week","day","hour","30min","15min","10min","5min","minute","30sec","15sec","10sec","5sec","second"],b.Area=function(c){function d(c){var f;return this instanceof b.Area?(f=a.extend({},e,c),this.cumulative=!f.behaveLikeLine,"auto"===f.fillOpacity&&(f.fillOpacity=f.behaveLikeLine?.8:1),d.__super__.constructor.call(this,f),void 0):new b.Area(c)}var e;return h(d,c),e={fillOpacity:"auto",behaveLikeLine:!1},d.prototype.calcPoints=function(){var a,b,c,d,e,f,g;for(f=this.data,g=[],d=0,e=f.length;e>d;d++)a=f[d],a._x=this.transX(a.x),b=0,a._y=function(){var d,e,f,g;for(f=a.y,g=[],d=0,e=f.length;e>d;d++)c=f[d],this.options.behaveLikeLine?g.push(this.transY(c)):(b+=c||0,g.push(this.transY(b)));return g}.call(this),g.push(a._ymax=Math.max.apply(Math,a._y));return g},d.prototype.drawSeries=function(){var a,b,c,d,e,f,g,h;for(this.seriesPoints=[],b=this.options.behaveLikeLine?function(){f=[];for(var a=0,b=this.options.ykeys.length-1;b>=0?b>=a:a>=b;b>=0?a++:a--)f.push(a);return f}.apply(this):function(){g=[];for(var a=e=this.options.ykeys.length-1;0>=e?0>=a:a>=0;0>=e?a++:a--)g.push(a);return g}.apply(this),h=[],c=0,d=b.length;d>c;c++)a=b[c],this._drawFillFor(a),this._drawLineFor(a),h.push(this._drawPointFor(a));return h},d.prototype._drawFillFor=function(a){var b;return b=this.paths[a],null!==b?(b+="L"+this.transX(this.xmax)+","+this.bottom+"L"+this.transX(this.xmin)+","+this.bottom+"Z",this.drawFilledPath(b,this.fillForSeries(a))):void 0},d.prototype.fillForSeries=function(a){var b;return b=Raphael.rgb2hsl(this.colorFor(this.data[a],a,"line")),Raphael.hsl(b.h,this.options.behaveLikeLine?.9*b.s:.75*b.s,Math.min(.98,this.options.behaveLikeLine?1.2*b.l:1.25*b.l))},d.prototype.drawFilledPath=function(a,b){return this.raphael.path(a).attr("fill",b).attr("fill-opacity",this.options.fillOpacity).attr("stroke","none")},d}(b.Line),b.Bar=function(c){function d(c){return this.onHoverOut=f(this.onHoverOut,this),this.onHoverMove=f(this.onHoverMove,this),this.onGridClick=f(this.onGridClick,this),this instanceof b.Bar?(d.__super__.constructor.call(this,a.extend({},c,{parseTime:!1})),void 0):new b.Bar(c)}return h(d,c),d.prototype.init=function(){return this.cumulative=this.options.stacked,"always"!==this.options.hideHover?(this.hover=new b.Hover({parent:this.el}),this.on("hovermove",this.onHoverMove),this.on("hoverout",this.onHoverOut),this.on("gridclick",this.onGridClick)):void 0},d.prototype.defaults={barSizeRatio:.75,barGap:3,barColors:["#0b62a4","#7a92a3","#4da74d","#afd8f8","#edc240","#cb4b4b","#9440ed"],barOpacity:1,barRadius:[0,0,0,0],xLabelMargin:50},d.prototype.calc=function(){var a;return this.calcBars(),this.options.hideHover===!1?(a=this.hover).update.apply(a,this.hoverContentForRow(this.data.length-1)):void 0},d.prototype.calcBars=function(){var a,b,c,d,e,f,g;for(f=this.data,g=[],a=d=0,e=f.length;e>d;a=++d)b=f[a],b._x=this.left+this.width*(a+.5)/this.data.length,g.push(b._y=function(){var a,d,e,f;for(e=b.y,f=[],a=0,d=e.length;d>a;a++)c=e[a],null!=c?f.push(this.transY(c)):f.push(null);return f}.call(this));return g},d.prototype.draw=function(){var a;return((a=this.options.axes)===!0||"both"===a||"x"===a)&&this.drawXAxis(),this.drawSeries()},d.prototype.drawXAxis=function(){var a,b,c,d,e,f,g,h,i,j,k,l,m;for(j=this.bottom+(this.options.xAxisLabelTopPadding||this.options.padding/2),g=null,f=null,m=[],a=k=0,l=this.data.length;l>=0?l>k:k>l;a=l>=0?++k:--k)h=this.data[this.data.length-1-a],b=this.drawXAxisLabel(h._x,j,h.label),i=b.getBBox(),b.transform("r"+-this.options.xLabelAngle),c=b.getBBox(),b.transform("t0,"+c.height/2+"..."),0!==this.options.xLabelAngle&&(e=-.5*i.width*Math.cos(this.options.xLabelAngle*Math.PI/180),b.transform("t"+e+",0...")),(null==g||g>=c.x+c.width||null!=f&&f>=c.x)&&c.x>=0&&c.x+c.width=0?this.transY(0):null,this.bars=function(){var h,l,p,q;for(p=this.data,q=[],d=h=0,l=p.length;l>h;d=++h)i=p[d],e=0,q.push(function(){var h,l,p,q;for(p=i._y,q=[],j=h=0,l=p.length;l>h;j=++h)n=p[j],null!==n?(o?(m=Math.min(n,o),b=Math.max(n,o)):(m=n,b=this.bottom),f=this.left+d*c+g,this.options.stacked||(f+=j*(a+this.options.barGap)),k=b-m,this.options.verticalGridCondition&&this.options.verticalGridCondition(i.x)&&this.drawBar(this.left+d*c,this.top,c,Math.abs(this.top-this.bottom),this.options.verticalGridColor,this.options.verticalGridOpacity,this.options.barRadius),this.options.stacked&&(m-=e),this.drawBar(f,m,a,k,this.colorFor(i,j,"bar"),this.options.barOpacity,this.options.barRadius),q.push(e+=k)):q.push(null);return q}.call(this));return q}.call(this)},d.prototype.colorFor=function(a,b,c){var d,e;return"function"==typeof this.options.barColors?(d={x:a.x,y:a.y[b],label:a.label},e={index:b,key:this.options.ykeys[b],label:this.options.labels[b]},this.options.barColors.call(this,d,e,c)):this.options.barColors[b%this.options.barColors.length]},d.prototype.hitTest=function(a){return 0===this.data.length?null:(a=Math.max(Math.min(a,this.right),this.left),Math.min(this.data.length-1,Math.floor((a-this.left)/(this.width/this.data.length))))},d.prototype.onGridClick=function(a,b){var c;return c=this.hitTest(a),this.fire("click",c,this.data[c].src,a,b)},d.prototype.onHoverMove=function(a){var b,c;return b=this.hitTest(a),(c=this.hover).update.apply(c,this.hoverContentForRow(b))},d.prototype.onHoverOut=function(){return this.options.hideHover!==!1?this.hover.hide():void 0},d.prototype.hoverContentForRow=function(a){var b,c,d,e,f,g,h,i;for(d=this.data[a],b="
"+d.label+"
",i=d.y,c=g=0,h=i.length;h>g;c=++g)f=i[c],b+="
\n "+this.options.labels[c]+":\n "+this.yLabelFormat(f)+"\n
";return"function"==typeof this.options.hoverCallback&&(b=this.options.hoverCallback(a,this.options,b,d.src)),e=this.left+(a+.5)*this.width/this.data.length,[b,e]},d.prototype.drawXAxisLabel=function(a,b,c){var d;return d=this.raphael.text(a,b,c).attr("font-size",this.options.gridTextSize).attr("font-family",this.options.gridTextFamily).attr("font-weight",this.options.gridTextWeight).attr("fill",this.options.gridTextColor)},d.prototype.drawBar=function(a,b,c,d,e,f,g){var h,i;return h=Math.max.apply(Math,g),i=0===h||h>d?this.raphael.rect(a,b,c,d):this.raphael.path(this.roundedRect(a,b,c,d,g)),i.attr("fill",e).attr("fill-opacity",f).attr("stroke","none")},d.prototype.roundedRect=function(a,b,c,d,e){return null==e&&(e=[0,0,0,0]),["M",a,e[0]+b,"Q",a,b,a+e[0],b,"L",a+c-e[1],b,"Q",a+c,b,a+c,b+e[1],"L",a+c,b+d-e[2],"Q",a+c,b+d,a+c-e[2],b+d,"L",a+e[3],b+d,"Q",a,b+d,a,b+d-e[3],"Z"]},d}(b.Grid),b.Donut=function(c){function d(c){this.resizeHandler=f(this.resizeHandler,this),this.select=f(this.select,this),this.click=f(this.click,this);var d=this;if(!(this instanceof b.Donut))return new b.Donut(c);if(this.options=a.extend({},this.defaults,c),this.el="string"==typeof c.element?a(document.getElementById(c.element)):a(c.element),null===this.el||0===this.el.length)throw new Error("Graph placeholder not found.");void 0!==c.data&&0!==c.data.length&&(this.raphael=new Raphael(this.el[0]),this.options.resize&&a(window).bind("resize",function(){return null!=d.timeoutId&&window.clearTimeout(d.timeoutId),d.timeoutId=window.setTimeout(d.resizeHandler,100)}),this.setData(c.data))}return h(d,c),d.prototype.defaults={colors:["#0B62A4","#3980B5","#679DC6","#95BBD7","#B0CCE1","#095791","#095085","#083E67","#052C48","#042135"],backgroundColor:"#FFFFFF",labelColor:"#000000",formatter:b.commas,resize:!1},d.prototype.redraw=function(){var a,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x;for(this.raphael.clear(),c=this.el.width()/2,d=this.el.height()/2,n=(Math.min(c,d)-10)/3,l=0,u=this.values,o=0,r=u.length;r>o;o++)m=u[o],l+=m;for(i=5/(2*n),a=1.9999*Math.PI-i*this.data.length,g=0,f=0,this.segments=[],v=this.values,e=p=0,s=v.length;s>p;e=++p)m=v[e],j=g+i+a*(m/l),k=new b.DonutSegment(c,d,2*n,n,g,j,this.data[e].color||this.options.colors[f%this.options.colors.length],this.options.backgroundColor,f,this.raphael),k.render(),this.segments.push(k),k.on("hover",this.select),k.on("click",this.click),g=j,f+=1;for(this.text1=this.drawEmptyDonutLabel(c,d-10,this.options.labelColor,15,800),this.text2=this.drawEmptyDonutLabel(c,d+10,this.options.labelColor,14),h=Math.max.apply(Math,this.values),f=0,w=this.values,x=[],q=0,t=w.length;t>q;q++){if(m=w[q],m===h){this.select(f); -break}x.push(f+=1)}return x},d.prototype.setData=function(a){var b;return this.data=a,this.values=function(){var a,c,d,e;for(d=this.data,e=[],a=0,c=d.length;c>a;a++)b=d[a],e.push(parseFloat(b.value));return e}.call(this),this.redraw()},d.prototype.click=function(a){return this.fire("click",a,this.data[a])},d.prototype.select=function(a){var b,c,d,e,f,g;for(g=this.segments,e=0,f=g.length;f>e;e++)c=g[e],c.deselect();return d=this.segments[a],d.select(),b=this.data[a],this.setLabels(b.label,this.options.formatter(b.value,b))},d.prototype.setLabels=function(a,b){var c,d,e,f,g,h,i,j;return c=2*(Math.min(this.el.width()/2,this.el.height()/2)-10)/3,f=1.8*c,e=c/2,d=c/3,this.text1.attr({text:a,transform:""}),g=this.text1.getBBox(),h=Math.min(f/g.width,e/g.height),this.text1.attr({transform:"S"+h+","+h+","+(g.x+g.width/2)+","+(g.y+g.height)}),this.text2.attr({text:b,transform:""}),i=this.text2.getBBox(),j=Math.min(f/i.width,d/i.height),this.text2.attr({transform:"S"+j+","+j+","+(i.x+i.width/2)+","+i.y})},d.prototype.drawEmptyDonutLabel=function(a,b,c,d,e){var f;return f=this.raphael.text(a,b,"").attr("font-size",d).attr("fill",c),null!=e&&f.attr("font-weight",e),f},d.prototype.resizeHandler=function(){return this.timeoutId=null,this.raphael.setSize(this.el.width(),this.el.height()),this.redraw()},d}(b.EventEmitter),b.DonutSegment=function(a){function b(a,b,c,d,e,g,h,i,j,k){this.cx=a,this.cy=b,this.inner=c,this.outer=d,this.color=h,this.backgroundColor=i,this.index=j,this.raphael=k,this.deselect=f(this.deselect,this),this.select=f(this.select,this),this.sin_p0=Math.sin(e),this.cos_p0=Math.cos(e),this.sin_p1=Math.sin(g),this.cos_p1=Math.cos(g),this.is_long=g-e>Math.PI?1:0,this.path=this.calcSegment(this.inner+3,this.inner+this.outer-5),this.selectedPath=this.calcSegment(this.inner+3,this.inner+this.outer),this.hilight=this.calcArc(this.inner)}return h(b,a),b.prototype.calcArcPoints=function(a){return[this.cx+a*this.sin_p0,this.cy+a*this.cos_p0,this.cx+a*this.sin_p1,this.cy+a*this.cos_p1]},b.prototype.calcSegment=function(a,b){var c,d,e,f,g,h,i,j,k,l;return k=this.calcArcPoints(a),c=k[0],e=k[1],d=k[2],f=k[3],l=this.calcArcPoints(b),g=l[0],i=l[1],h=l[2],j=l[3],"M"+c+","+e+("A"+a+","+a+",0,"+this.is_long+",0,"+d+","+f)+("L"+h+","+j)+("A"+b+","+b+",0,"+this.is_long+",1,"+g+","+i)+"Z"},b.prototype.calcArc=function(a){var b,c,d,e,f;return f=this.calcArcPoints(a),b=f[0],d=f[1],c=f[2],e=f[3],"M"+b+","+d+("A"+a+","+a+",0,"+this.is_long+",0,"+c+","+e)},b.prototype.render=function(){var a=this;return this.arc=this.drawDonutArc(this.hilight,this.color),this.seg=this.drawDonutSegment(this.path,this.color,this.backgroundColor,function(){return a.fire("hover",a.index)},function(){return a.fire("click",a.index)})},b.prototype.drawDonutArc=function(a,b){return this.raphael.path(a).attr({stroke:b,"stroke-width":2,opacity:0})},b.prototype.drawDonutSegment=function(a,b,c,d,e){return this.raphael.path(a).attr({fill:b,stroke:c,"stroke-width":3}).hover(d).click(e)},b.prototype.select=function(){return this.selected?void 0:(this.seg.animate({path:this.selectedPath},150,"<>"),this.arc.animate({opacity:1},150,"<>"),this.selected=!0)},b.prototype.deselect=function(){return this.selected?(this.seg.animate({path:this.path},150,"<>"),this.arc.animate({opacity:0},150,"<>"),this.selected=!1):void 0},b}(b.EventEmitter)}).call(this); diff --git a/web/gui/lib/raphael-2.2.4-min.js b/web/gui/lib/raphael-2.2.4-min.js deleted file mode 100644 index ea2d8c190..000000000 --- a/web/gui/lib/raphael-2.2.4-min.js +++ /dev/null @@ -1,4 +0,0 @@ -// SPDX-License-Identifier: MIT -!function t(e,r){"object"==typeof exports&&"object"==typeof module?module.exports=r():"function"==typeof define&&define.amd?define([],r):"object"==typeof exports?exports.Raphael=r():e.Raphael=r()}(this,function(){return function(t){function e(i){if(r[i])return r[i].exports;var n=r[i]={exports:{},id:i,loaded:!1};return t[i].call(n.exports,n,n.exports,e),n.loaded=!0,n.exports}var r={};return e.m=t,e.c=r,e.p="",e(0)}([function(t,e,r){var i,n;i=[r(1),r(3),r(4)],n=function(t){return t}.apply(e,i),!(void 0!==n&&(t.exports=n))},function(t,e,r){var i,n;i=[r(2)],n=function(t){function e(r){if(e.is(r,"function"))return w?r():t.on("raphael.DOMload",r);if(e.is(r,Q))return e._engine.create[z](e,r.splice(0,3+e.is(r[0],$))).add(r);var i=Array.prototype.slice.call(arguments,0);if(e.is(i[i.length-1],"function")){var n=i.pop();return w?n.call(e._engine.create[z](e,i)):t.on("raphael.DOMload",function(){n.call(e._engine.create[z](e,i))})}return e._engine.create[z](e,arguments)}function r(t){if("function"==typeof t||Object(t)!==t)return t;var e=new t.constructor;for(var i in t)t[T](i)&&(e[i]=r(t[i]));return e}function i(t,e){for(var r=0,i=t.length;r=1e3&&delete o[l.shift()],l.push(s),o[s]=t[z](e,a),r?r(o[s]):o[s])}return n}function a(){return this.hex}function s(t,e){for(var r=[],i=0,n=t.length;n-2*!e>i;i+=2){var a=[{x:+t[i-2],y:+t[i-1]},{x:+t[i],y:+t[i+1]},{x:+t[i+2],y:+t[i+3]},{x:+t[i+4],y:+t[i+5]}];e?i?n-4==i?a[3]={x:+t[0],y:+t[1]}:n-2==i&&(a[2]={x:+t[0],y:+t[1]},a[3]={x:+t[2],y:+t[3]}):a[0]={x:+t[n-2],y:+t[n-1]}:n-4==i?a[3]=a[2]:i||(a[0]={x:+t[i],y:+t[i+1]}),r.push(["C",(-a[0].x+6*a[1].x+a[2].x)/6,(-a[0].y+6*a[1].y+a[2].y)/6,(a[1].x+6*a[2].x-a[3].x)/6,(a[1].y+6*a[2].y-a[3].y)/6,a[2].x,a[2].y])}return r}function o(t,e,r,i,n){var a=-3*e+9*r-9*i+3*n,s=t*a+6*e-12*r+6*i;return t*s-3*e+3*r}function l(t,e,r,i,n,a,s,l,h){null==h&&(h=1),h=h>1?1:h<0?0:h;for(var u=h/2,c=12,f=[-.1252,.1252,-.3678,.3678,-.5873,.5873,-.7699,.7699,-.9041,.9041,-.9816,.9816],p=[.2491,.2491,.2335,.2335,.2032,.2032,.1601,.1601,.1069,.1069,.0472,.0472],d=0,g=0;gd;)c/=2,f+=(pW(n,s)||W(e,i)W(a,o))){var l=(t*i-e*r)*(n-s)-(t-r)*(n*o-a*s),h=(t*i-e*r)*(a-o)-(e-i)*(n*o-a*s),u=(t-r)*(a-o)-(e-i)*(n-s);if(u){var c=l/u,f=h/u,p=+c.toFixed(2),d=+f.toFixed(2);if(!(p<+G(t,r).toFixed(2)||p>+W(t,r).toFixed(2)||p<+G(n,s).toFixed(2)||p>+W(n,s).toFixed(2)||d<+G(e,i).toFixed(2)||d>+W(e,i).toFixed(2)||d<+G(a,o).toFixed(2)||d>+W(a,o).toFixed(2)))return{x:c,y:f}}}}function c(t,e){return p(t,e)}function f(t,e){return p(t,e,1)}function p(t,r,i){var n=e.bezierBBox(t),a=e.bezierBBox(r);if(!e.isBBoxIntersect(n,a))return i?0:[];for(var s=l.apply(0,t),o=l.apply(0,r),h=W(~~(s/5),1),c=W(~~(o/5),1),f=[],p=[],d={},g=i?0:[],x=0;x=0&&S<=1.001&&T>=0&&T<=1.001&&(i?g++:g.push({x:C.x,y:C.y,t1:G(S,1),t2:G(T,1)}))}}return g}function d(t,r,i){t=e._path2curve(t),r=e._path2curve(r);for(var n,a,s,o,l,h,u,c,f,d,g=i?0:[],x=0,v=t.length;xi)return i;for(;ra?r=n:i=n,n=(i-r)/2+r}return n}var h=3*e,u=3*(i-e)-h,c=1-h-u,f=3*r,p=3*(n-r)-f,d=1-f-p;return o(t,1/(200*a))}function m(t,e){var r=[],i={};if(this.ms=e,this.times=1,t){for(var n in t)t[T](n)&&(i[ht(n)]=t[n],r.push(ht(n)));r.sort(Bt)}this.anim=i,this.top=r[r.length-1],this.percents=r}function b(r,i,n,a,s,o){n=ht(n);var l,h,u,c=[],f,p,d,x=r.ms,v={},m={},b={};if(a)for(w=0,B=Ee.length;wa*r.top){n=r.percents[w],p=r.percents[w-1]||0,x=x/r.top*(n-p),f=r.percents[w+1],l=r.anim[n];break}a&&i.attr(r.anim[r.percents[w]])}if(l){if(h)h.initstatus=a,h.start=new Date-h.ms*a;else{for(var C in l)if(l[T](C)&&(pt[T](C)||i.paper.customAttributes[T](C)))switch(v[C]=i.attr(C),null==v[C]&&(v[C]=ft[C]),m[C]=l[C],pt[C]){case $:b[C]=(m[C]-v[C])/x;break;case"colour":v[C]=e.getRGB(v[C]);var S=e.getRGB(m[C]);b[C]={r:(S.r-v[C].r)/x,g:(S.g-v[C].g)/x,b:(S.b-v[C].b)/x};break;case"path":var A=Qt(v[C],m[C]),E=A[1];for(v[C]=A[0],b[C]=[],w=0,B=v[C].length;w',Lt=Nt.firstChild,Lt.style.behavior="url(#default#VML)",!Lt||"object"!=typeof Lt.adj)return e.type=R;Nt=null}e.svg=!(e.vml="VML"==e.type),e._Paper=M,e.fn=N=M.prototype=e.prototype,e._id=0,e.is=function(t,e){return e=O.call(e),"finite"==e?!at[T](+t):"array"==e?t instanceof Array:"null"==e&&null===t||e==typeof t&&null!==t||"object"==e&&t===Object(t)||"array"==e&&Array.isArray&&Array.isArray(t)||tt.call(t).slice(8,-1).toLowerCase()==e},e.angle=function(t,r,i,n,a,s){if(null==a){var o=t-i,l=r-n;return o||l?(180+180*Y.atan2(-l,-o)/U+360)%360:0}return e.angle(t,r,a,s)-e.angle(i,n,a,s)},e.rad=function(t){return t%360*U/180},e.deg=function(t){return Math.round(180*t/U%360*1e3)/1e3},e.snapTo=function(t,r,i){if(i=e.is(i,"finite")?i:10,e.is(t,Q)){for(var n=t.length;n--;)if(H(t[n]-r)<=i)return t[n]}else{t=+t;var a=r%t;if(at-i)return r-a+t}return r};var zt=e.createUUID=function(t,e){return function(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(t,e).toUpperCase()}}(/[xy]/g,function(t){var e=16*Y.random()|0,r="x"==t?e:3&e|8;return r.toString(16)});e.setWindow=function(r){t("raphael.setWindow",e,A.win,r),A.win=r,A.doc=A.win.document,e._engine.initWin&&e._engine.initWin(A.win)};var Ft=function(t){if(e.vml){var r=/^\s+|\s+$/g,i;try{var a=new ActiveXObject("htmlfile");a.write(""),a.close(),i=a.body}catch(s){i=createPopup().document.body}var o=i.createTextRange();Ft=n(function(t){try{i.style.color=j(t).replace(r,R);var e=o.queryCommandValue("ForeColor");return e=(255&e)<<16|65280&e|(16711680&e)>>>16,"#"+("000000"+e.toString(16)).slice(-6)}catch(n){return"none"}})}else{var l=A.doc.createElement("i");l.title="Raphaël Colour Picker",l.style.display="none",A.doc.body.appendChild(l),Ft=n(function(t){return l.style.color=t,A.doc.defaultView.getComputedStyle(l,R).getPropertyValue("color")})}return Ft(t)},Pt=function(){return"hsb("+[this.h,this.s,this.b]+")"},Rt=function(){return"hsl("+[this.h,this.s,this.l]+")"},It=function(){return this.hex},jt=function(t,r,i){if(null==r&&e.is(t,"object")&&"r"in t&&"g"in t&&"b"in t&&(i=t.b,r=t.g,t=t.r),null==r&&e.is(t,Z)){var n=e.getRGB(t);t=n.r,r=n.g,i=n.b}return(t>1||r>1||i>1)&&(t/=255,r/=255,i/=255),[t,r,i]},qt=function(t,r,i,n){t*=255,r*=255,i*=255;var a={r:t,g:r,b:i,hex:e.rgb(t,r,i),toString:It};return e.is(n,"finite")&&(a.opacity=n),a};e.color=function(t){var r;return e.is(t,"object")&&"h"in t&&"s"in t&&"b"in t?(r=e.hsb2rgb(t),t.r=r.r,t.g=r.g,t.b=r.b,t.hex=r.hex):e.is(t,"object")&&"h"in t&&"s"in t&&"l"in t?(r=e.hsl2rgb(t),t.r=r.r,t.g=r.g,t.b=r.b,t.hex=r.hex):(e.is(t,"string")&&(t=e.getRGB(t)),e.is(t,"object")&&"r"in t&&"g"in t&&"b"in t?(r=e.rgb2hsl(t),t.h=r.h,t.s=r.s,t.l=r.l,r=e.rgb2hsb(t),t.v=r.b):(t={hex:"none"},t.r=t.g=t.b=t.h=t.s=t.v=t.l=-1)),t.toString=It,t},e.hsb2rgb=function(t,e,r,i){this.is(t,"object")&&"h"in t&&"s"in t&&"b"in t&&(r=t.b,e=t.s,i=t.o,t=t.h),t*=360;var n,a,s,o,l;return t=t%360/60,l=r*e,o=l*(1-H(t%2-1)),n=a=s=r-l,t=~~t,n+=[l,o,0,0,o,l][t],a+=[o,l,l,o,0,0][t],s+=[0,0,o,l,l,o][t],qt(n,a,s,i)},e.hsl2rgb=function(t,e,r,i){this.is(t,"object")&&"h"in t&&"s"in t&&"l"in t&&(r=t.l,e=t.s,t=t.h),(t>1||e>1||r>1)&&(t/=360,e/=100,r/=100),t*=360;var n,a,s,o,l;return t=t%360/60,l=2*e*(r<.5?r:1-r),o=l*(1-H(t%2-1)),n=a=s=r-l/2,t=~~t,n+=[l,o,0,0,o,l][t],a+=[o,l,l,o,0,0][t],s+=[0,0,o,l,l,o][t],qt(n,a,s,i)},e.rgb2hsb=function(t,e,r){r=jt(t,e,r),t=r[0],e=r[1],r=r[2];var i,n,a,s;return a=W(t,e,r),s=a-G(t,e,r),i=0==s?null:a==t?(e-r)/s:a==e?(r-t)/s+2:(t-e)/s+4,i=(i+360)%6*60/360,n=0==s?0:s/a,{h:i,s:n,b:a,toString:Pt}},e.rgb2hsl=function(t,e,r){r=jt(t,e,r),t=r[0],e=r[1],r=r[2];var i,n,a,s,o,l;return s=W(t,e,r),o=G(t,e,r),l=s-o,i=0==l?null:s==t?(e-r)/l:s==e?(r-t)/l+2:(t-e)/l+4,i=(i+360)%6*60/360,a=(s+o)/2,n=0==l?0:a<.5?l/(2*a):l/(2-2*a),{h:i,s:n,l:a,toString:Rt}},e._path2string=function(){return this.join(",").replace(vt,"$1")};var Dt=e._preload=function(t,e){var r=A.doc.createElement("img");r.style.cssText="position:absolute;left:-9999em;top:-9999em",r.onload=function(){e.call(this),this.onload=null,A.doc.body.removeChild(this)},r.onerror=function(){A.doc.body.removeChild(this)},A.doc.body.appendChild(r),r.src=t};e.getRGB=n(function(t){if(!t||(t=j(t)).indexOf("-")+1)return{r:-1,g:-1,b:-1,hex:"none",error:1,toString:a};if("none"==t)return{r:-1,g:-1,b:-1,hex:"none",toString:a};!(xt[T](t.toLowerCase().substring(0,2))||"#"==t.charAt())&&(t=Ft(t));var r,i,n,s,o,l,h,u=t.match(nt);return u?(u[2]&&(s=ut(u[2].substring(5),16),n=ut(u[2].substring(3,5),16),i=ut(u[2].substring(1,3),16)),u[3]&&(s=ut((l=u[3].charAt(3))+l,16),n=ut((l=u[3].charAt(2))+l,16),i=ut((l=u[3].charAt(1))+l,16)),u[4]&&(h=u[4][q](gt),i=ht(h[0]),"%"==h[0].slice(-1)&&(i*=2.55),n=ht(h[1]),"%"==h[1].slice(-1)&&(n*=2.55),s=ht(h[2]),"%"==h[2].slice(-1)&&(s*=2.55),"rgba"==u[1].toLowerCase().slice(0,4)&&(o=ht(h[3])),h[3]&&"%"==h[3].slice(-1)&&(o/=100)),u[5]?(h=u[5][q](gt),i=ht(h[0]),"%"==h[0].slice(-1)&&(i*=2.55),n=ht(h[1]),"%"==h[1].slice(-1)&&(n*=2.55),s=ht(h[2]),"%"==h[2].slice(-1)&&(s*=2.55),("deg"==h[0].slice(-3)||"°"==h[0].slice(-1))&&(i/=360),"hsba"==u[1].toLowerCase().slice(0,4)&&(o=ht(h[3])),h[3]&&"%"==h[3].slice(-1)&&(o/=100),e.hsb2rgb(i,n,s,o)):u[6]?(h=u[6][q](gt),i=ht(h[0]),"%"==h[0].slice(-1)&&(i*=2.55),n=ht(h[1]),"%"==h[1].slice(-1)&&(n*=2.55),s=ht(h[2]),"%"==h[2].slice(-1)&&(s*=2.55),("deg"==h[0].slice(-3)||"°"==h[0].slice(-1))&&(i/=360),"hsla"==u[1].toLowerCase().slice(0,4)&&(o=ht(h[3])),h[3]&&"%"==h[3].slice(-1)&&(o/=100),e.hsl2rgb(i,n,s,o)):(u={r:i,g:n,b:s,toString:a},u.hex="#"+(16777216|s|n<<8|i<<16).toString(16).slice(1),e.is(o,"finite")&&(u.opacity=o),u)):{r:-1,g:-1,b:-1,hex:"none",error:1,toString:a}},e),e.hsb=n(function(t,r,i){return e.hsb2rgb(t,r,i).hex}),e.hsl=n(function(t,r,i){return e.hsl2rgb(t,r,i).hex}),e.rgb=n(function(t,e,r){function i(t){return t+.5|0}return"#"+(16777216|i(r)|i(e)<<8|i(t)<<16).toString(16).slice(1)}),e.getColor=function(t){var e=this.getColor.start=this.getColor.start||{h:0,s:1,b:t||.75},r=this.hsb2rgb(e.h,e.s,e.b);return e.h+=.075,e.h>1&&(e.h=0,e.s-=.2,e.s<=0&&(this.getColor.start={h:0,s:1,b:e.b})),r.hex},e.getColor.reset=function(){delete this.start},e.parsePathString=function(t){if(!t)return null;var r=Vt(t);if(r.arr)return Yt(r.arr);var i={a:7,c:6,h:1,l:2,m:2,r:4,q:4,s:4,t:2,v:1,z:0},n=[];return e.is(t,Q)&&e.is(t[0],Q)&&(n=Yt(t)),n.length||j(t).replace(yt,function(t,e,r){var a=[],s=e.toLowerCase();if(r.replace(bt,function(t,e){e&&a.push(+e)}),"m"==s&&a.length>2&&(n.push([e][F](a.splice(0,2))),s="l",e="m"==e?"l":"L"),"r"==s)n.push([e][F](a));else for(;a.length>=i[s]&&(n.push([e][F](a.splice(0,i[s]))),i[s]););}),n.toString=e._path2string,r.arr=Yt(n),n},e.parseTransformString=n(function(t){if(!t)return null;var r={r:3,s:4,t:2,m:6},i=[];return e.is(t,Q)&&e.is(t[0],Q)&&(i=Yt(t)),i.length||j(t).replace(mt,function(t,e,r){var n=[],a=O.call(e);r.replace(bt,function(t,e){e&&n.push(+e)}),i.push([e][F](n))}),i.toString=e._path2string,i});var Vt=function(t){var e=Vt.ps=Vt.ps||{};return e[t]?e[t].sleep=100:e[t]={sleep:100},setTimeout(function(){for(var r in e)e[T](r)&&r!=t&&(e[r].sleep--,!e[r].sleep&&delete e[r])}),e[t]};e.findDotsAtSegment=function(t,e,r,i,n,a,s,o,l){var h=1-l,u=X(h,3),c=X(h,2),f=l*l,p=f*l,d=u*t+3*c*l*r+3*h*l*l*n+p*s,g=u*e+3*c*l*i+3*h*l*l*a+p*o,x=t+2*l*(r-t)+f*(n-2*r+t),v=e+2*l*(i-e)+f*(a-2*i+e),y=r+2*l*(n-r)+f*(s-2*n+r),m=i+2*l*(a-i)+f*(o-2*a+i),b=h*t+l*r,_=h*e+l*i,w=h*n+l*s,k=h*a+l*o,B=90-180*Y.atan2(x-y,v-m)/U;return(x>y||v=t.x&&e<=t.x2&&r>=t.y&&r<=t.y2},e.isBBoxIntersect=function(t,r){var i=e.isPointInsideBBox;return i(r,t.x,t.y)||i(r,t.x2,t.y)||i(r,t.x,t.y2)||i(r,t.x2,t.y2)||i(t,r.x,r.y)||i(t,r.x2,r.y)||i(t,r.x,r.y2)||i(t,r.x2,r.y2)||(t.xr.x||r.xt.x)&&(t.yr.y||r.yt.y)},e.pathIntersection=function(t,e){return d(t,e)},e.pathIntersectionNumber=function(t,e){return d(t,e,1)},e.isPointInsidePath=function(t,r,i){var n=e.pathBBox(t);return e.isPointInsideBBox(n,r,i)&&d(t,[["M",r,i],["H",n.x2+10]],1)%2==1},e._removedFactory=function(e){return function(){t("raphael.log",null,"Raphaël: you are calling to method “"+e+"” of removed object",e)}};var Ot=e.pathBBox=function(t){var e=Vt(t);if(e.bbox)return r(e.bbox);if(!t)return{x:0,y:0,width:0,height:0,x2:0,y2:0};t=Qt(t);for(var i=0,n=0,a=[],s=[],o,l=0,h=t.length;l1&&(b=Y.sqrt(b),r=b*r,i=b*i);var _=r*r,w=i*i,k=(s==o?-1:1)*Y.sqrt(H((_*w-_*m*m-w*y*y)/(_*m*m+w*y*y))),B=k*r*m/i+(t+l)/2,C=k*-i*y/r+(e+h)/2,S=Y.asin(((e-C)/i).toFixed(9)),T=Y.asin(((h-C)/i).toFixed(9));S=tT&&(S-=2*U),!o&&T>S&&(T-=2*U)}var A=T-S;if(H(A)>c){var E=T,M=l,N=h;T=S+c*(o&&T>S?1:-1),l=B+r*Y.cos(T),h=C+i*Y.sin(T),p=Ut(l,h,r,i,a,0,o,M,N,[T,E,B,C])}A=T-S;var L=Y.cos(S),z=Y.sin(S),P=Y.cos(T),R=Y.sin(T),I=Y.tan(A/4),j=4/3*r*I,D=4/3*i*I,V=[t,e],O=[t+j*z,e-D*L],W=[l+j*R,h-D*P],G=[l,h];if(O[0]=2*V[0]-O[0],O[1]=2*V[1]-O[1],u)return[O,W,G][F](p);p=[O,W,G][F](p).join()[q](",");for(var X=[],$=0,Z=p.length;$"1e12"&&(c=.5),H(f)>"1e12"&&(f=.5),c>0&&c<1&&(g=$t(t,e,r,i,n,a,s,o,c),d.push(g.x),p.push(g.y)),f>0&&f<1&&(g=$t(t,e,r,i,n,a,s,o,f),d.push(g.x),p.push(g.y)),l=a-2*i+e-(o-2*a+i),h=2*(i-e)-2*(a-i),u=e-i,c=(-h+Y.sqrt(h*h-4*l*u))/2/l,f=(-h-Y.sqrt(h*h-4*l*u))/2/l,H(c)>"1e12"&&(c=.5),H(f)>"1e12"&&(f=.5),c>0&&c<1&&(g=$t(t,e,r,i,n,a,s,o,c),d.push(g.x),p.push(g.y)),f>0&&f<1&&(g=$t(t,e,r,i,n,a,s,o,f),d.push(g.x),p.push(g.y)),{min:{x:G[z](0,d),y:G[z](0,p)},max:{x:W[z](0,d),y:W[z](0,p)}}}),Qt=e._path2curve=n(function(t,e){var r=!e&&Vt(t);if(!e&&r.curve)return Yt(r.curve);for(var i=Gt(t),n=e&&Gt(e),a={x:0,y:0,bx:0,by:0,X:0,Y:0,qx:null,qy:null},s={x:0,y:0,bx:0,by:0,X:0,Y:0,qx:null,qy:null},o=(function(t,e,r){var i,n,a={T:1,Q:1};if(!t)return["C",e.x,e.y,e.x,e.y,e.x,e.y];switch(!(t[0]in a)&&(e.qx=e.qy=null),t[0]){case"M":e.X=t[1],e.Y=t[2];break;case"A":t=["C"][F](Ut[z](0,[e.x,e.y][F](t.slice(1))));break;case"S":"C"==r||"S"==r?(i=2*e.x-e.bx,n=2*e.y-e.by):(i=e.x,n=e.y),t=["C",i,n][F](t.slice(1));break;case"T":"Q"==r||"T"==r?(e.qx=2*e.x-e.qx,e.qy=2*e.y-e.qy):(e.qx=e.x,e.qy=e.y),t=["C"][F](Xt(e.x,e.y,e.qx,e.qy,t[1],t[2]));break;case"Q":e.qx=t[1],e.qy=t[2],t=["C"][F](Xt(e.x,e.y,t[1],t[2],t[3],t[4]));break;case"L":t=["C"][F](Ht(e.x,e.y,t[1],t[2]));break;case"H":t=["C"][F](Ht(e.x,e.y,t[1],e.y));break;case"V":t=["C"][F](Ht(e.x,e.y,e.x,t[1]));break;case"Z":t=["C"][F](Ht(e.x,e.y,e.X,e.Y))}return t}),l=function(t,e){if(t[e].length>7){t[e].shift();for(var r=t[e];r.length;)u[e]="A",n&&(c[e]="A"),t.splice(e++,0,["C"][F](r.splice(0,6)));t.splice(e,1),g=W(i.length,n&&n.length||0)}},h=function(t,e,r,a,s){t&&e&&"M"==t[s][0]&&"M"!=e[s][0]&&(e.splice(s,0,["M",a.x,a.y]),r.bx=0,r.by=0,r.x=t[s][1],r.y=t[s][2],g=W(i.length,n&&n.length||0))},u=[],c=[],f="",p="",d=0,g=W(i.length,n&&n.length||0);dn){if(r&&!c.start){if(f=ke(s,o,l[1],l[2],l[3],l[4],l[5],l[6],n-p),u+=["C"+f.start.x,f.start.y,f.m.x,f.m.y,f.x,f.y],a)return u;c.start=u,u=["M"+f.x,f.y+"C"+f.n.x,f.n.y,f.end.x,f.end.y,l[5],l[6]].join(),p+=h,s=+l[5],o=+l[6];continue}if(!t&&!r)return f=ke(s,o,l[1],l[2],l[3],l[4],l[5],l[6],n-p),{x:f.x,y:f.y,alpha:f.alpha}}p+=h,s=+l[5],o=+l[6]}u+=l.shift()+l}return c.end=u,f=t?p:r?c:e.findDotsAtSegment(s,o,l[0],l[1],l[2],l[3],l[4],l[5],1),f.alpha&&(f={x:f.x,y:f.y,alpha:f.alpha}),f}},Ce=Be(1),Se=Be(),Te=Be(0,1);e.getTotalLength=Ce,e.getPointAtLength=Se,e.getSubpath=function(t,e,r){if(this.getTotalLength(t)-r<1e-6)return Te(t,e).end;var i=Te(t,r,1);return e?Te(i,e).end:i},ye.getTotalLength=function(){var t=this.getPath();if(t)return this.node.getTotalLength?this.node.getTotalLength():Ce(t)},ye.getPointAtLength=function(t){var e=this.getPath();if(e)return Se(e,t)},ye.getPath=function(){var t,r=e._getPath[this.type];if("text"!=this.type&&"set"!=this.type)return r&&(t=r(this)),t},ye.getSubpath=function(t,r){var i=this.getPath();if(i)return e.getSubpath(i,t,r)};var Ae=e.easing_formulas={linear:function(t){return t},"<":function(t){return X(t,1.7)},">":function(t){return X(t,.48)},"<>":function(t){var e=.48-t/1.04,r=Y.sqrt(.1734+e*e),i=r-e,n=X(H(i),1/3)*(i<0?-1:1),a=-r-e,s=X(H(a),1/3)*(a<0?-1:1),o=n+s+.5;return 3*(1-o)*o*o+o*o*o},backIn:function(t){var e=1.70158;return t*t*((e+1)*t-e)},backOut:function(t){t-=1;var e=1.70158;return t*t*((e+1)*t+e)+1},elastic:function(t){return t==!!t?t:X(2,-10*t)*Y.sin((t-.075)*(2*U)/.3)+1},bounce:function(t){var e=7.5625,r=2.75,i;return t<1/r?i=e*t*t:t<2/r?(t-=1.5/r,i=e*t*t+.75):t<2.5/r?(t-=2.25/r,i=e*t*t+.9375):(t-=2.625/r,i=e*t*t+.984375),i}};Ae.easeIn=Ae["ease-in"]=Ae["<"],Ae.easeOut=Ae["ease-out"]=Ae[">"],Ae.easeInOut=Ae["ease-in-out"]=Ae["<>"],Ae["back-in"]=Ae.backIn,Ae["back-out"]=Ae.backOut;var Ee=[],Me=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(t){setTimeout(t,16)},Ne=function(){for(var r=+new Date,i=0;i1&&!n.next){for(x in u)u[T](x)&&(g[x]=n.totalOrigin[x]);n.el.attr(g),b(n.anim,n.el,n.anim.percents[0],null,n.totalOrigin,n.repeat-1)}n.next&&!n.stop&&b(n.anim,n.el,n.next,null,n.totalOrigin,n.repeat)}}}Ee.length&&Me(Ne)},Le=function(t){return t>255?255:t<0?0:t};ye.animateWith=function(t,r,i,n,a,s){var o=this;if(o.removed)return s&&s.call(o),o;var l=i instanceof m?i:e.animation(i,n,a,s),h,u;b(l,o,l.percents[0],null,o.attr());for(var c=0,f=Ee.length;cl&&(l=u)}l+="%",!t[l].callback&&(t[l].callback=n)}return new m(t,r)},ye.animate=function(t,r,i,n){var a=this;if(a.removed)return n&&n.call(a),a;var s=t instanceof m?t:e.animation(t,r,i,n);return b(s,a,s.percents[0],null,a.attr()),a},ye.setTime=function(t,e){return t&&null!=e&&this.status(t,G(e,t.ms)/t.ms),this},ye.status=function(t,e){var r=[],i=0,n,a;if(null!=e)return b(t,this,-1,G(e,1)),this;for(n=Ee.length;i.5)-1;l(f-.5,2)+l(p-.5,2)>.25&&(p=a.sqrt(.25-l(f-.5,2))*n+.5)&&.5!=p&&(p=p.toFixed(5)-1e-5*n)}return c}),n=n.split(/\s*\-\s*/),"linear"==h){var b=n.shift();if(b=-i(b),isNaN(b))return null;var _=[0,0,a.cos(t.rad(b)),a.sin(t.rad(b))],w=1/(s(o(_[2]),o(_[3]))||1);_[2]*=w,_[3]*=w,_[2]<0&&(_[0]=-_[2],_[2]=0),_[3]<0&&(_[1]=-_[3],_[3]=0)}var k=t._parseDots(n);if(!k)return null;if(u=u.replace(/[\(\)\s,\xb0#]/g,"_"),e.gradient&&u!=e.gradient.id&&(g.defs.removeChild(e.gradient),delete e.gradient),!e.gradient){y=x(h+"Gradient",{id:u}),e.gradient=y,x(y,"radial"==h?{fx:f,fy:p}:{x1:_[0],y1:_[1],x2:_[2],y2:_[3],gradientTransform:e.matrix.invert()}),g.defs.appendChild(y);for(var B=0,C=k.length;B1?z.opacity/100:z.opacity -});case"stroke":z=t.getRGB(g),l.setAttribute(d,z.hex),"stroke"==d&&z[e]("opacity")&&x(l,{"stroke-opacity":z.opacity>1?z.opacity/100:z.opacity}),"stroke"==d&&i._.arrows&&("startString"in i._.arrows&&_(i,i._.arrows.startString),"endString"in i._.arrows&&_(i,i._.arrows.endString,1));break;case"gradient":("circle"==i.type||"ellipse"==i.type||"r"!=r(g).charAt())&&v(i,g);break;case"opacity":u.gradient&&!u[e]("stroke-opacity")&&x(l,{"stroke-opacity":g>1?g/100:g});case"fill-opacity":if(u.gradient){F=t._g.doc.getElementById(l.getAttribute("fill").replace(/^url\(#|\)$/g,c)),F&&(P=F.getElementsByTagName("stop"),x(P[P.length-1],{"stop-opacity":g}));break}default:"font-size"==d&&(g=n(g,10)+"px");var R=d.replace(/(\-.)/g,function(t){return t.substring(1).toUpperCase()});l.style[R]=g,i._.dirty=1,l.setAttribute(d,g)}}S(i,a),l.style.visibility=f},C=1.2,S=function(i,a){if("text"==i.type&&(a[e]("text")||a[e]("font")||a[e]("font-size")||a[e]("x")||a[e]("y"))){var s=i.attrs,o=i.node,l=o.firstChild?n(t._g.doc.defaultView.getComputedStyle(o.firstChild,c).getPropertyValue("font-size"),10):10;if(a[e]("text")){for(s.text=a.text;o.firstChild;)o.removeChild(o.firstChild);for(var h=r(a.text).split("\n"),u=[],f,p=0,d=h.length;p"));var Z=X.getBoundingClientRect();m.W=f.w=(Z.right-Z.left)/U,m.H=f.h=(Z.bottom-Z.top)/U,m.X=f.x,m.Y=f.y+m.H/2,("x"in l||"y"in l)&&(m.path.v=t.format("m{0},{1}l{2},{1}",a(f.x*b),a(f.y*b),a(f.x*b)+1));for(var Q=["x","y","text","font","font-family","font-weight","font-style","font-size"],J=0,K=Q.length;J.25&&(r=n.sqrt(.25-l(e-.5,2))*(2*(r>.5)-1)+.5),f=e+p+r),d}),a=a.split(/\s*\-\s*/),"linear"==c){var g=a.shift();if(g=-i(g),isNaN(g))return null}var x=t._parseDots(a);if(!x)return null;if(e=e.shape||e.node,x.length){e.removeChild(s),s.on=!0,s.method="none",s.color=x[0].color,s.color2=x[x.length-1].color;for(var v=[],y=0,m=x.length;y')}}catch(r){N=function(t){return e.createElement("<"+t+' xmlns="urn:schemas-microsoft.com:vml" class="rvml">')}}},t._engine.initWin(t._g.win),t._engine.create=function(){var e=t._getContainer.apply(0,arguments),r=e.container,i=e.height,n,a=e.width,s=e.x,o=e.y;if(!r)throw new Error("VML container not found.");var l=new t._Paper,h=l.canvas=t._g.doc.createElement("div"),u=h.style;return s=s||0,o=o||0,a=a||512,i=i||342,l.width=a,l.height=i,a==+a&&(a+="px"),i==+i&&(i+="px"),l.coordsize=1e3*b+p+1e3*b,l.coordorigin="0 0",l.span=t._g.doc.createElement("span"),l.span.style.cssText="position:absolute;left:-9999em;top:-9999em;padding:0;margin:0;line-height:1;",h.appendChild(l.span),u.cssText=t.format("top:0;left:0;width:{0};height:{1};display:inline-block;position:relative;clip:rect(0 {0} {1} 0);overflow:hidden",a,i),1==r?(t._g.doc.body.appendChild(h),u.left=s+"px",u.top=o+"px",u.position="absolute"):r.firstChild?r.insertBefore(h,r.firstChild):r.appendChild(h),l.renderfix=function(){},l},t.prototype.clear=function(){t.eve("raphael.clear",this),this.canvas.innerHTML=d,this.span=t._g.doc.createElement("span"),this.span.style.cssText="position:absolute;left:-9999em;top:-9999em;padding:0;margin:0;line-height:1;display:inline;",this.canvas.appendChild(this.span),this.bottom=this.top=null},t.prototype.remove=function(){t.eve("raphael.remove",this),this.canvas.parentNode.removeChild(this.canvas);for(var e in this)this[e]="function"==typeof this[e]?t._removedFactory(e):null;return!0};var L=t.st;for(var z in M)M[e](z)&&!L[e](z)&&(L[z]=function(t){return function(){var e=arguments;return this.forEach(function(r){r[t].apply(r,e)})}}(z))}}.apply(e,i),!(void 0!==n&&(t.exports=n))}])}); diff --git a/web/gui/main.css b/web/gui/main.css index 3e2c4bfc3..b1fb94fdd 100644 --- a/web/gui/main.css +++ b/web/gui/main.css @@ -1,6 +1,6 @@ /* force the vertical window scrollbar */ html { - overflow-y: scroll; + overflow-y: hidden; } /* prevent body from hiding under the navbar */ @@ -532,7 +532,11 @@ body.modal-open { transition: 0s } -/* --- */ +/* -------------------------------------------------------------------------- */ + +#my-netdata-dropdown-content { + width: 500px; +} #my-netdata-dropdown-content a:hover { color: #fff; @@ -551,6 +555,10 @@ body.modal-open { font-weight: 300; } +#my-netdata-dropdown-content .agent-item .__title { + cursor: pointer; +} + #my-netdata-dropdown-content .agent-item:hover { background-color: #262626; } @@ -571,7 +579,7 @@ body.modal-open { #my-netdata-dropdown-content .agent-item :nth-child(2) { min-width: 420px; - line-height: 14px; + line-height: 32px; } .agent-item--separated { @@ -605,9 +613,57 @@ body.modal-open { } #my-netdata-dropdown-content.theme-white a { - color: #555; + color: #888; } #my-netdata-dropdown-content.theme-white a:hover { color: #000; } + +#sign-in-iframe { + background-color: #fff; + border: none; +} + +#cloud-menu { +} + +#cloud-menu.dropdown-menu > li > a { + text-align: left; +} + +#my-netdata-menu-filter-input { + color: #fff; + border: none; + background-color: #4b4f55; + width: 472px; + margin: 5px 14px; + margin-right: 0; + padding: 2px 5px; + outline: none; +} + +#my-netdata-menu-filter-input::placeholder { + opacity: 0.7; +} + +#my-netdata-dropdown-content.theme-white #my-netdata-menu-filter-input { + background-color: #e7e7e7; + color: #555; +} + +.filter-control { + position: relative; +} + +.filter-control .filter-control__clear { + cursor: pointer; + position: absolute; + top: 7px; + right: 19px; +} + +#hostname { + font-size: 18px; +} + diff --git a/web/gui/main.js b/web/gui/main.js index a04f406bd..b6478f6cf 100644 --- a/web/gui/main.js +++ b/web/gui/main.js @@ -1,5 +1,8 @@ // Main JavaScript file for the Netdata GUI. +// Codacy declarations +/* global NETDATA */ + // netdata snapshot data var netdataSnapshotData = null; @@ -453,7 +456,7 @@ function saveObjectToClient(data, filename) { saveTextToClient(JSON.stringify(data), filename); } -// -------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // registry call back to render my-netdata menu function toggleExpandIcon(svgEl) { @@ -476,21 +479,8 @@ function toggleAgentItem(e, guid) { } } -// TODO: consider renaming to `truncateString` - -/// Enforces a maximum string length while retaining the prefix and the postfix of -/// the string. -function clipString(str, maxLength) { - if (str.length <= maxLength) { - return str; - } - - const spanLength = Math.floor((maxLength - 3) / 2); - return `${str.substring(0, spanLength)}...${str.substring(str.length - spanLength)}`; -} - -// When you stream metrics from netdata to netdata, the recieving netdata now -// has multiple host databases. It's own, and multiple mirrored. Mirrored databases +// When you stream metrics from netdata to netdata, the recieving netdata now +// has multiple host databases. It's own, and multiple mirrored. Mirrored databases // can be accessed with function renderStreamedHosts(options) { let html = `
Databases streamed to this agent
`; @@ -512,12 +502,22 @@ function renderStreamedHosts(options) { return naturalSortCompare(a.hostname, b.hostname); }); - for (const s of sorted) { + let displayedDatabases = false; + + for (var s of sorted) { let url, icon; const hostname = s.hostname; + if (myNetdataMenuFilterValue !== "") { + if (!hostname.includes(myNetdataMenuFilterValue)) { + continue; + } + } + + displayedDatabases = true; + if (hostname === master) { - url = `base${'/'}`; + url = `${base}/`; icon = 'home'; } else { url = `${base}/host/${hostname}/`; @@ -528,18 +528,33 @@ function renderStreamedHosts(options) { `` ) } + if (!displayedDatabases) { + html += ( + `
+ + no databases match the filter criteria. +
` + ) + } + return html; } function renderMachines(machinesArray) { - let html = `
My netdata agents
`; + // let html = isSignedIn() + // ? `
My nodes
` + // : `
My nodes
`; + + let html = `
My nodes
`; if (machinesArray === null) { let ret = loadLocalStorage("registryCallback"); @@ -550,6 +565,9 @@ function renderMachines(machinesArray) { } let found = false; + let displayedAgents = false; + + const maskedURL = NETDATA.registry.MASKED_DATA; if (machinesArray) { saveLocalStorage("registryCallback", JSON.stringify(machinesArray)); @@ -558,21 +576,34 @@ function renderMachines(machinesArray) { return naturalSortCompare(a.name, b.name); }); - for (const machine of machines) { + for (var machine of machines) { found = true; + if (myNetdataMenuFilterValue !== "") { + if (!machine.name.includes(myNetdataMenuFilterValue)) { + continue; + } + } + + displayedAgents = true; + const alternateUrlItems = ( `` @@ -581,8 +612,8 @@ function renderMachines(machinesArray) { html += ( `
- - ${machine.name} + + ${machine.name} @@ -591,25 +622,34 @@ function renderMachines(machinesArray) { ${alternateUrlItems}` ) } + + if (found && (!displayedAgents)) { + html += ( + `
+ + zero nodes are matching the filter value. +
` + ) + } } if (!found) { if (machines) { html += ( `
` ) } else { html += ( `` ) } html += `
`; - html += `
Demo netdata agents
`; + html += `
Demo netdata nodes
`; const demoServers = [ {url: "//london.netdata.rocks/default.html", title: "UK - London (DigitalOcean.com)"}, @@ -623,10 +663,10 @@ function renderMachines(machinesArray) { ] - for (const server of demoServers) { + for (var server of demoServers) { html += ( ` @@ -638,36 +678,111 @@ function renderMachines(machinesArray) { return html; } -// Populates the my-netdata menu. -function netdataRegistryCallback(machinesArray) { +function setMyNetdataMenu(html) { + const el = document.getElementById('my-netdata-dropdown-content') + el.innerHTML = html; +} + +function clearMyNetdataMenu() { + setMyNetdataMenu(`
+ + Loading, please wait... +
+
`); +} + +function errorMyNetdataMenu() { + setMyNetdataMenu(`
+ + Cannot load known netdata agents from netdata.cloud! +
+
`); +} + +function restrictMyNetdataMenu() { + setMyNetdataMenu(`
+ Please sign in to netdata.cloud to view your nodes! +
+
`); +} + +function renderMyNetdataMenu(machinesArray) { + const el = document.getElementById('my-netdata-dropdown-content'); + el.classList.add(`theme-${netdataTheme}`); + + if (!isSignedIn()) { + if (!NETDATA.registry.isRegistryEnabled()) { + restrictMyNetdataMenu(); + return; + } + } + + if (machinesArray == registryAgents) { + console.log("Rendering my-netdata menu from registry"); + } else { + console.log("Rendering my-netdata menu from netdata.cloud", machinesArray); + } + let html = ''; + if (isSignedIn()) { + html += ( + `
+ + +
+
` + ); + } + if (options.hosts.length > 1) { - html += renderStreamedHosts(options) + `
`; + html += `
${renderStreamedHosts(options)}

`; } - html += renderMachines(machinesArray); - - html += ( - `
-
- - Switch Identity -
-
-
- - What is this? -
-
` - ) + html += `
${renderMachines(machinesArray)}
`; + + if (!isSignedIn()) { + html += ( + `
+
+ + Switch Identity +
+
+
+ + What is this? +
+
` + ) + } else { + html += ( + `
+ +
+ + What is this? +
+
` + ) + } - const el = document.getElementById('my-netdata-dropdown-content') - el.classList.add(`theme-${netdataTheme}`); el.innerHTML = html; gotoServerInit(); -}; +} function isdemo() { if (this_is_demo !== null) { @@ -762,6 +877,7 @@ function gotoServerValidateUrl(id, guid, url) { } else { document.getElementById('gotoServerResponse').innerHTML += 'found it! It is at:
' + escapeUserInputHTML(url) + ''; document.location = verifyURL(finalURL); + $('#gotoServerModal').modal('hide'); } } } else { @@ -800,24 +916,29 @@ function gotoServerModalHandler(guid) { gotoServerValidateUrl(count++, guid, url); } - setTimeout(function () { - if (gotoServerStop === false) { - document.getElementById('gotoServerResponse').innerHTML = 'Added all the known URLs for this machine.'; - NETDATA.registry.search(guid, function (data) { - // console.log(data); - len = data.urls.length; - while (len--) { - var url = data.urls[len][1]; - // console.log(url); - if (typeof checked[url] === 'undefined') { - gotoServerValidateRemaining++; - checked[url] = true; - gotoServerValidateUrl(count++, guid, url); + if (!isSignedIn()) { + // When the registry is enabled, if the user's known URLs are not working + // we consult the registry to get additional URLs. + setTimeout(function () { + if (gotoServerStop === false) { + document.getElementById('gotoServerResponse').innerHTML = 'Added all the known URLs for this machine.'; + NETDATA.registry.search(guid, function (data) { + // console.log(data); + len = data.urls.length; + while (len--) { + var url = data.urls[len][1]; + // console.log(url); + if (typeof checked[url] === 'undefined') { + gotoServerValidateRemaining++; + checked[url] = true; + gotoServerValidateUrl(count++, guid, url); + } } - } - }); - } - }, 2000); + }); + } + }, 2000); + } + return false; } @@ -858,30 +979,55 @@ function notifyForSwitchRegistry() { } } +var deleteRegistryGuid = null; var deleteRegistryUrl = null; function deleteRegistryModalHandler(guid, name, url) { - void (guid); + // void (guid); + deleteRegistryGuid = guid; deleteRegistryUrl = url; + document.getElementById('deleteRegistryServerName').innerHTML = name; document.getElementById('deleteRegistryServerName2').innerHTML = name; document.getElementById('deleteRegistryServerURL').innerHTML = url; document.getElementById('deleteRegistryResponse').innerHTML = ''; + $('#deleteRegistryModal').modal('show'); } function notifyForDeleteRegistry() { + const responseEl = document.getElementById('deleteRegistryResponse'); + if (deleteRegistryUrl) { - NETDATA.registry.delete(deleteRegistryUrl, function (result) { - if (result !== null) { - deleteRegistryUrl = null; - $('#deleteRegistryModal').modal('hide'); - NETDATA.registry.init(); - } else { - document.getElementById('deleteRegistryResponse').innerHTML = "Sorry! this command was rejected by the registry server."; - } - }); + if (isSignedIn()) { + deleteCloudAgentURL(deleteRegistryGuid, deleteRegistryUrl) + .then((count) => { + if (!count) { + responseEl.innerHTML = "Sorry, this command was rejected by netdata.cloud!"; + return; + } + NETDATA.registry.delete(deleteRegistryUrl, function (result) { + if (result === null) { + console.log("Received error from registry", result); + } + + deleteRegistryUrl = null; + $('#deleteRegistryModal').modal('hide'); + NETDATA.registry.init(); + }); + }); + } else { + NETDATA.registry.delete(deleteRegistryUrl, function (result) { + if (result !== null) { + deleteRegistryUrl = null; + $('#deleteRegistryModal').modal('hide'); + NETDATA.registry.init(); + } else { + responseEl.innerHTML = "Sorry, this command was rejected by the registry server!"; + } + }); + } } } @@ -1165,6 +1311,7 @@ function enrichChartData(chart) { case 'ap': case 'net': case 'disk': + case 'powersupply': case 'statsd': chart.menu = tmp; break; @@ -1610,9 +1757,9 @@ function renderPage(menus, data) { html += mhead + shtml + '

'; } - sidebar += '
  • add more charts
  • '; + sidebar += '
  • add more charts
  • '; sidebar += '
  • add more alarms
  • '; - sidebar += '
  • netdata on ' + data.hostname.toString() + ', collects every ' + ((data.update_every === 1) ? 'second' : data.update_every.toString() + ' seconds') + ' ' + data.dimensions_count.toLocaleString() + ' metrics, presented as ' + data.charts_count.toLocaleString() + ' charts and monitored by ' + data.alarms_count.toLocaleString() + ' alarms, using ' + Math.round(data.rrd_memory_bytes / 1024 / 1024).toLocaleString() + ' MB of memory for ' + NETDATA.seconds4human(data.update_every * data.history, {space: ' '}) + ' of real-time history.
     
    netdata
    v' + data.version.toString() + '
  • '; + sidebar += '
  • netdata on ' + data.hostname.toString() + ', collects every ' + ((data.update_every === 1) ? 'second' : data.update_every.toString() + ' seconds') + ' ' + data.dimensions_count.toLocaleString() + ' metrics, presented as ' + data.charts_count.toLocaleString() + ' charts and monitored by ' + data.alarms_count.toLocaleString() + ' alarms, using ' + Math.round(data.rrd_memory_bytes / 1024 / 1024).toLocaleString() + ' MB of memory for ' + NETDATA.seconds4human(data.update_every * data.history, {space: ' '}) + ' of real-time history.
     
    netdata
    ' + data.version.toString() + '
  • '; sidebar += ''; div.innerHTML = html; document.getElementById('sidebar').innerHTML = sidebar; @@ -1696,7 +1843,7 @@ function renderChartsAndMenu(data) { // propagate the descriptive subname given to QoS // to all the other submenus with the same name - for (m in menus) { + for (var m in menus) { if (!menus.hasOwnProperty(m)) { continue; } @@ -2560,7 +2707,7 @@ function initializeDynamicDashboardWithData(data) { } // update the dashboard hostname - document.getElementById('hostname').innerHTML = options.hostname + ((netdataSnapshotData !== null) ? ' (snap)' : '').toString(); + document.getElementById('hostname').innerHTML = options.hostname + ((netdataSnapshotData !== null) ? ' (snap)' : '').toString() + '  '; document.getElementById('hostname').href = NETDATA.serverDefault; document.getElementById('netdataVersion').innerHTML = options.version; @@ -2676,89 +2823,43 @@ function versionLog(msg) { document.getElementById('versionCheckLog').innerHTML = msg; } -function getNetdataCommitIdFromVersion() { - var s = options.version.split('-'); +// New way of checking for updates, based only on versions - if (s.length !== 3) { - return null; - } - if (s[2][0] === 'g') { - var v = s[2].split('_')[0].substring(1, 8); - if (v.length === 7) { - versionLog('Installed git commit id of netdata is ' + v); - document.getElementById('netdataCommitId').innerHTML = v; - return v; - } +function versionsMatch(v1, v2) { + if (v1 == v2) { + return true; + } else { + var s1=v1.split('-'); + var s2=v2.split('-'); + if (s1.length !== s2.length) return false; + if (s1.length === 4) s1.pop(); + if (s2.length === 4) s2.pop(); + return (s1.join('-') === s2.join('-')); } - return null; } -function getNetdataCommitId(force, callback) { - versionLog('Downloading installed git commit id from netdata...'); +function getGithubLatestVersion(callback) { + versionLog('Downloading latest version id from github...'); $.ajax({ - url: 'version.txt', - async: true, - cache: false, - xhrFields: {withCredentials: true} // required for the cookie - }) - .done(function (data) { - data = data.replace(/(\r\n|\n|\r| |\t)/gm, ""); - - var c = getNetdataCommitIdFromVersion(); - if (c !== null && data.length === 40 && data.substring(0, 7) !== c) { - versionLog('Installed files commit id and internal netdata git commit id do not match'); - data = c; - } - - if (data.length >= 7) { - versionLog('Installed git commit id of netdata is ' + data); - document.getElementById('netdataCommitId').innerHTML = data.substring(0, 7); - callback(data); - } - }) - .fail(function () { - versionLog('Failed to download installed git commit id from netdata!'); - - if (force === true) { - var c = getNetdataCommitIdFromVersion(); - if (c === null) { - versionLog('Cannot find the git commit id of netdata.'); - } - callback(c); - } else { - callback(null); - } - }); -} - -function getGithubLatestCommit(callback) { - versionLog('Downloading latest git commit id info from github...'); - - $.ajax({ - url: 'https://api.github.com/repos/netdata/netdata/commits', + url: 'https://api.github.com/repositories/10744183/contents/packaging/version?ref=master', async: true, cache: false }) .done(function (data) { - versionLog('Latest git commit id from github is ' + data[0].sha); - callback(data[0].sha); + data = atob(data.content.replace(/(\r\n|\n|\r| |\t)/gm, "")); + versionLog('Latest version from github is ' + data); + callback(data); }) .fail(function () { - versionLog('Failed to download installed git commit id from github!'); + versionLog('Failed to download the latest version id from github!'); callback(null); }); } -function checkForUpdate(force, callback) { - getNetdataCommitId(force, function (sha1) { - if (sha1 === null) { - callback(null, null); - } - - getGithubLatestCommit(function (sha2) { - callback(sha1, sha2); - }); +function checkForUpdateByVersion(force, callback) { + getGithubLatestVersion(function (sha2) { + callback(options.version, sha2); }); return null; @@ -2784,23 +2885,22 @@ function notifyForUpdate(force) { } } - checkForUpdate(force, function (sha1, sha2) { + checkForUpdateByVersion(force, function (sha1, sha2) { var save = false; if (sha1 === null) { save = false; - versionLog('

    Failed to get your netdata git commit id!

    You can always get the latest netdata from its github page.

    '); + versionLog('

    Failed to get your netdata version!

    You can always get the latest netdata from its github page.

    '); } else if (sha2 === null) { save = false; - versionLog('

    Failed to get the latest git commit id from github.

    You can always get the latest netdata from its github page.

    '); - } else if (sha1 === sha2) { + versionLog('

    Failed to get the latest netdata version github.

    You can always get the latest netdata from its github page.

    '); + } else if (versionsMatch(sha1, sha2)) { save = true; versionLog('

    You already have the latest netdata!

    No update yet?
    Probably, we need some motivation to keep going on!

    If you haven\'t already, give netdata a at its github page.

    '); } else { save = true; - var compare = 'https://github.com/netdata/netdata/compare/' + sha1.toString() + '...' + sha2.toString(); - - versionLog('

    New version of netdata available!

    Latest commit: ' + sha2.substring(0, 7).toString() + '

    Click here for the changes log since your installed version, and
    click here for directions on updating your netdata installation.

    We suggest to review the changes log for new features you may be interested, or important bug fixes you may need.
    Keeping your netdata updated, is generally a good idea.

    '); + var compare = 'https://docs.netdata.cloud/changelog/'; + versionLog('

    New version of netdata available!

    Latest version: ' + sha2 + '

    Click here for the changes log and
    click here for directions on updating your netdata installation.

    We suggest to review the changes log for new features you may be interested, or important bug fixes you may need.
    Keeping your netdata updated is generally a good idea.

    '); document.getElementById('update_badge').innerHTML = '!'; } @@ -3968,6 +4068,7 @@ function runOnceOnDashboardWithjQuery() { }) .on('shown.bs.dropdown', function () { Ps.update(document.getElementById('my-netdata-dropdown-content')); + myNetdataMenuDidShow(); }) .on('hidden.bs.dropdown', function () { NETDATA.unpause(); @@ -4106,7 +4207,7 @@ function runOnceOnDashboardWithjQuery() { } } } - + if (inTag && content[i] === '>') { inTag = false; } @@ -4209,7 +4310,7 @@ function finalizePage() { })(window, document, 'script', 'https://www.google-analytics.com/analytics.js', 'ga'); ga('create', 'UA-64295674-3', 'auto'); - ga('send', 'pageview'); + ga('send', 'pageview', '/demosite/' + window.location.host); }, 2000); } else { notifyForUpdate(); @@ -4341,3 +4442,523 @@ var selected_server_timezone = function (timezone, status) { // var netdataStarted = performance.now(); var netdataCallback = initializeDynamicDashboard; + +// ================================================================================================= +// netdata.cloud + +let registryAgents = []; + +let cloudAgents = []; + +let myNetdataMenuFilterValue = ""; + +let cloudAccountID = null; + +let cloudAccountName = null; + +let cloudToken = null; + +/// Enforces a maximum string length while retaining the prefix and the postfix of +/// the string. +function truncateString(str, maxLength) { + if (str.length <= maxLength) { + return str; + } + + const spanLength = Math.floor((maxLength - 3) / 2); + return `${str.substring(0, spanLength)}...${str.substring(str.length - spanLength)}`; +} + +// ------------------------------------------------------------------------------------------------- +// netdata.cloud API Client +// ------------------------------------------------------------------------------------------------- + +function isValidAgent(a) { + return a.urls != null && a.urls.length > 0; +} + +// https://github.com/netdata/hub/issues/146 +function getCloudAccountAgents() { + if (!isSignedIn()) { + return []; + } + + return fetch( + `${NETDATA.registry.cloudBaseURL}/api/v1/accounts/${cloudAccountID}/agents`, + { + method: "GET", + mode: "cors", + headers: { + "Authorization": `Bearer ${cloudToken}` + } + } + ).then((response) => { + if (!response.ok) { + throw Error("Cannot fetch known accounts"); + } + return response.json(); + }).then((payload) => { + const agents = payload.result ? payload.result.agents : null; + + if (!agents) { + return []; + } + + return agents.filter((a) => isValidAgent(a)).map((a) => { + return { + "guid": a.id, + "name": a.name, + "url": a.urls[0], + "alternate_urls": a.urls + } + }) + }).catch(function (error) { + console.log(error); + return null; + }); +} + +// https://github.com/netdata/hub/issues/128 +function postCloudAccountAgents(agentsToSync) { + if (!isSignedIn()) { + return []; + } + + const maskedURL = NETDATA.registry.MASKED_DATA; + + const agents = agentsToSync.map((a) => { + const urls = a.alternate_urls.filter((url) => url != maskedURL); + + return { + "id": a.guid, + "name": a.name, + "urls": urls + } + }).filter((a) => isValidAgent(a)) + + const payload = { + "accountID": cloudAccountID, + "agents": agents, + "merge": false, + }; + + return fetch( + `${NETDATA.registry.cloudBaseURL}/api/v1/accounts/${cloudAccountID}/agents`, + { + method: "POST", + mode: "cors", + headers: { + "Content-Type": "application/json; charset=utf-8", + "Authorization": `Bearer ${cloudToken}` + }, + body: JSON.stringify(payload) + } + ).then((response) => { + return response.json(); + }).then((payload) => { + const agents = payload.result ? payload.result.agents : null; + + if (!agents) { + return []; + } + + return agents.filter((a) => isValidAgent(a)).map((a) => { + return { + "guid": a.id, + "name": a.name, + "url": a.urls[0], + "alternate_urls": a.urls + } + }) + }); +} + +function deleteCloudAgentURL(agentID, url) { + if (!isSignedIn()) { + return []; + } + + return fetch( + `${NETDATA.registry.cloudBaseURL}/api/v1/accounts/${cloudAccountID}/agents/${agentID}/url?value=${encodeURIComponent(url)}`, + { + method: "DELETE", + mode: "cors", + headers: { + "Content-Type": "application/json; charset=utf-8", + "Authorization": `Bearer ${cloudToken}` + }, + } + ).then((response) => { + return response.json(); + }).then((payload) => { + const count = payload.result ? payload.result.count : 0; + return count; + }); +} + +// ------------------------------------------------------------------------------------------------- + +function signInDidClick(e) { + e.preventDefault(); + e.stopPropagation(); + + if (!NETDATA.registry.isUsingGlobalRegistry()) { + // If user is using a private registry, request his consent for + // synchronizing with cloud. + showSignInModal(); + return; + } + + signIn(); +} + +function signOutDidClick(e) { + e.preventDefault(); + e.stopPropagation(); + signOut(); +} + +// ------------------------------------------------------------------------------------------------- + +function updateMyNetdataAfterFilterChange() { + const machinesEl = document.getElementById("my-netdata-menu-machines") + machinesEl.innerHTML = renderMachines(cloudAgents); + + if (options.hosts.length > 1) { + const streamedEl = document.getElementById("my-netdata-menu-streamed") + streamedEl.innerHTML = renderStreamedHosts(options); + } +} + +function myNetdataMenuDidShow() { + const filterEl = document.getElementById("my-netdata-menu-filter-input"); + if (filterEl) { + filterEl.focus(); + } +} + +function myNetdataFilterDidChange(e) { + const inputEl = e.target; + setTimeout(() => { + myNetdataMenuFilterValue = inputEl.value; + updateMyNetdataAfterFilterChange(); + }, 1); +} + +function myNetdataFilterClearDidClick(e) { + e.preventDefault(); + e.stopPropagation(); + + const inputEl = document.getElementById("my-netdata-menu-filter-input"); + inputEl.value = ""; + myNetdataMenuFilterValue = ""; + + updateMyNetdataAfterFilterChange(); + + inputEl.focus(); +} + +// ------------------------------------------------------------------------------------------------- + +function clearCloudVariables() { + cloudAccountID = null; + cloudAccountName = null; + cloudToken = null; +} + +function clearCloudLocalStorageItems() { + localStorage.removeItem("cloud.baseURL"); + localStorage.removeItem("cloud.agentID"); + localStorage.removeItem("cloud.sync"); +} + +function signIn() { + const url = `${NETDATA.registry.cloudBaseURL}/account/sign-in-agent?origin=${encodeURIComponent(window.location.origin + "/")}`; + window.open(url); +} + +function signOut() { + cloudSSOSignOut(); +} + +function renderAccountUI() { + if (!NETDATA.registry.isCloudEnabled) { + return + } + + const container = document.getElementById("account-menu-container"); + if (isSignedIn()) { + container.removeAttribute("title"); + container.removeAttribute("data-original-title"); + container.removeAttribute("data-placement"); + container.innerHTML = ( + ` + ` + ) + document.getElementById("amc-account-name").textContent = cloudAccountName; // Anti-XSS + } else { + container.setAttribute("data-original-title", "sign in"); + container.setAttribute("data-placement", "bottom"); + container.innerHTML = ( + ` +   + ` + ) + } +} + +function handleMessage(e) { + switch (e.data.type) { + case "sign-in": + handleSignInMessage(e); + break; + + case "sign-out": + handleSignOutMessage(e); + break; + + default: + return; + } +} + +function handleSignInMessage(e) { + localStorage.setItem("cloud.baseURL", NETDATA.registry.cloudBaseURL); + + cloudAccountID = e.data.accountID; + cloudAccountName = e.data.accountName; + cloudToken = e.data.token; + + netdataRegistryCallback(registryAgents); +} + +function handleSignOutMessage(e) { + clearCloudVariables(); + renderAccountUI(); + renderMyNetdataMenu(registryAgents); +} + +function isSignedIn() { + return cloudToken != null && cloudAccountID != null; +} + +function sortedArraysEqual(a, b) { + if (a.length != b.length) return false; + + for (var i = 0; i < a.length; ++i) { + if (a[i] !== b[i]) return false; + } + + return true; +} + +// If merging is needed returns the merged agents set, otherwise returns null. +function mergeAgents(cloud, local) { + let dirty = false; + + const union = new Map(); + + for (const cagent of cloud) { + union.set(cagent.guid, cagent); + } + + for (const lagent of local) { + const cagent = union.get(lagent.guid); + if (cagent) { + for (const u of lagent.alternate_urls) { + if (u === NETDATA.registry.MASKED_DATA) { // TODO: temp until registry is updated. + continue; + } + + if (!cagent.alternate_urls.includes(u)) { + dirty = true; + cagent.alternate_urls.push(u); + } + } + } else { + dirty = true; + union.set(lagent.guid, lagent); + } + } + + if (dirty) { + return Array.from(union.values()); + } + + return null; +} + +function showSignInModal() { + document.getElementById("sim-registry").innerHTML = NETDATA.registry.server; + $("#signInModal").modal("show"); +} + +function explicitlySignIn() { + $("#signInModal").modal("hide"); + signIn(); +} + +function showSyncModal() { + document.getElementById("sync-registry-modal-registry").innerHTML = NETDATA.registry.server; + $("#syncRegistryModal").modal("show"); +} + +function explicitlySyncAgents() { + $("#syncRegistryModal").modal("hide"); + + const json = localStorage.getItem("cloud.sync"); + const sync = json ? JSON.parse(json): {}; + delete sync[cloudAccountID]; + localStorage.setItem("cloud.sync", JSON.stringify(sync)); + + NETDATA.registry.init(); +} + +function syncAgents(callback) { + const json = localStorage.getItem("cloud.sync"); + const sync = json ? JSON.parse(json): {}; + + const currentAgent = { + guid: NETDATA.registry.machine_guid, + name: NETDATA.registry.hostname, + url: NETDATA.serverDefault, + alternate_urls: [NETDATA.serverDefault], + } + + const localAgents = sync[cloudAccountID] + ? [currentAgent] + : registryAgents.concat([currentAgent]); + + console.log("Checking if sync is needed.", localAgents); + + const agentsToSync = mergeAgents(cloudAgents, localAgents); + + if ((!sync[cloudAccountID]) || agentsToSync) { + sync[cloudAccountID] = new Date().getTime(); + localStorage.setItem("cloud.sync", JSON.stringify(sync)); + } + + if (agentsToSync) { + console.log("Synchronizing with netdata.cloud."); + + postCloudAccountAgents(agentsToSync).then((agents) => { + // TODO: clear syncTime on error! + cloudAgents = agents; + callback(cloudAgents); + }); + + return + } + + callback(cloudAgents); +} + +let isCloudSSOInitialized = false; + +function cloudSSOInit() { + const iframeEl = document.getElementById("ssoifrm"); + const url = `${NETDATA.registry.cloudBaseURL}/account/sso-agent?id=${NETDATA.registry.machine_guid}`; + iframeEl.src = url; + isCloudSSOInitialized = true; +} + +function cloudSSOSignOut() { + const iframe = document.getElementById("ssoifrm"); + const url = `${NETDATA.registry.cloudBaseURL}/account/sign-out-agent`; + iframe.src = url; +} + +function initCloud() { + if (!NETDATA.registry.isCloudEnabled) { + clearCloudVariables(); + clearCloudLocalStorageItems(); + return; + } + + if (NETDATA.registry.cloudBaseURL != localStorage.getItem("cloud.baseURL")) { + clearCloudVariables(); + clearCloudLocalStorageItems(); + if (NETDATA.registry.cloudBaseURL) { + localStorage.setItem("cloud.baseURL", NETDATA.registry.cloudBaseURL); + } + } + + if (!isCloudSSOInitialized) { + cloudSSOInit(); + } + + renderAccountUI(); +} + +// This callback is called after NETDATA.registry is initialized. +function netdataRegistryCallback(machinesArray) { + localStorage.setItem("cloud.agentID", NETDATA.registry.machine_guid); + + initCloud(); + + registryAgents = machinesArray; + + if (isSignedIn()) { + // We call getCloudAccountAgents() here because it requires that + // NETDATA.registry is initialized. + clearMyNetdataMenu(); + getCloudAccountAgents().then((agents) => { + if (!agents) { + errorMyNetdataMenu(); + return; + } + cloudAgents = agents; + syncAgents((agents) => { + const agentsMap = {} + for (const agent of agents) { + agentsMap[agent.guid] = agent; + } + + NETDATA.registry.machines = agentsMap; + NETDATA.registry.machines_array = agents; + + renderMyNetdataMenu(agents); + }); + }); + } else { + renderMyNetdataMenu(machinesArray) + } +}; + +// If we know the cloudBaseURL and agentID from local storage render (eagerly) +// the account ui before receiving the definitive response from the web server. +// This improves the perceived performance. +function tryFastInitCloud() { + const baseURL = localStorage.getItem("cloud.baseURL"); + const agentID = localStorage.getItem("cloud.agentID"); + + if (baseURL && agentID) { + NETDATA.registry.cloudBaseURL = baseURL; + NETDATA.registry.machine_guid = agentID; + NETDATA.registry.isCloudEnabled = true; + + initCloud(); + } +} + +function initializeApp() { + window.addEventListener("message", handleMessage, false); + +// tryFastInitCloud(); +} + +if (document.readyState === "complete") { + initializeApp(); +} else { + document.addEventListener("readystatechange", () => { + if (document.readyState === "complete") { + initializeApp(); + } + }) +} diff --git a/web/gui/manifest.json b/web/gui/manifest.json new file mode 100644 index 000000000..52cb4831c --- /dev/null +++ b/web/gui/manifest.json @@ -0,0 +1,41 @@ +{ + "name": "App", + "icons": [ + { + "src": "images\/android-icon-36x36.png", + "sizes": "36x36", + "type": "image\/png", + "density": "0.75" + }, + { + "src": "images\/android-icon-48x48.png", + "sizes": "48x48", + "type": "image\/png", + "density": "1.0" + }, + { + "src": "images\/android-icon-72x72.png", + "sizes": "72x72", + "type": "image\/png", + "density": "1.5" + }, + { + "src": "images\/android-icon-96x96.png", + "sizes": "96x96", + "type": "image\/png", + "density": "2.0" + }, + { + "src": "images\/android-icon-144x144.png", + "sizes": "144x144", + "type": "image\/png", + "density": "3.0" + }, + { + "src": "images\/android-icon-192x192.png", + "sizes": "192x192", + "type": "image\/png", + "density": "4.0" + } + ] +} diff --git a/web/gui/src/dashboard.js/charting/_c3.js b/web/gui/src/dashboard.js/charting/_c3.js new file mode 100644 index 000000000..6688bbcce --- /dev/null +++ b/web/gui/src/dashboard.js/charting/_c3.js @@ -0,0 +1,114 @@ + +// DEPRECATED: will be removed! + +// c3 + +NETDATA.c3Initialize = function(callback) { + if (typeof netdataNoC3 === 'undefined' || !netdataNoC3) { + + // C3 requires D3 + if (!NETDATA.chartLibraries.d3.initialized) { + if (NETDATA.chartLibraries.d3.enabled) { + NETDATA.d3Initialize(function() { + NETDATA.c3Initialize(callback); + }); + } else { + NETDATA.chartLibraries.c3.enabled = false; + if (typeof callback === "function") + return callback(); + } + } else { + NETDATA._loadCSS(NETDATA.c3_css); + + $.ajax({ + url: NETDATA.c3_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('c3', NETDATA.c3_js); + }) + .fail(function() { + NETDATA.chartLibraries.c3.enabled = false; + NETDATA.error(100, NETDATA.c3_js); + }) + .always(function() { + if (typeof callback === "function") + return callback(); + }); + } + } else { + NETDATA.chartLibraries.c3.enabled = false; + if (typeof callback === "function") + return callback(); + } +}; + +NETDATA.c3ChartUpdate = function(state, data) { + state.c3_instance.destroy(); + return NETDATA.c3ChartCreate(state, data); + + //state.c3_instance.load({ + // rows: data.result, + // unload: true + //}); + + //return true; +}; + +NETDATA.c3ChartCreate = function(state, data) { + + state.element_chart.id = 'c3-' + state.uuid; + // console.log('id = ' + state.element_chart.id); + + state.c3_instance = c3.generate({ + bindto: '#' + state.element_chart.id, + size: { + width: state.chartWidth(), + height: state.chartHeight() + }, + color: { + pattern: state.chartColors() + }, + data: { + x: 'time', + rows: data.result, + type: (state.chart.chart_type === 'line')?'spline':'area-spline' + }, + axis: { + x: { + type: 'timeseries', + tick: { + format: function(x) { + return NETDATA.dateTime.xAxisTimeString(x); + } + } + } + }, + grid: { + x: { + show: true + }, + y: { + show: true + } + }, + point: { + show: false + }, + line: { + connectNull: false + }, + transition: { + duration: 0 + }, + interaction: { + enabled: true + } + }); + + // console.log(state.c3_instance); + + return true; +}; diff --git a/web/gui/src/dashboard.js/charting/_morris.js b/web/gui/src/dashboard.js/charting/_morris.js new file mode 100644 index 000000000..30789e4e2 --- /dev/null +++ b/web/gui/src/dashboard.js/charting/_morris.js @@ -0,0 +1,81 @@ + +// DEPRECATED: will be removed! + +// morris + +NETDATA.morrisInitialize = function(callback) { + if (typeof netdataNoMorris === 'undefined' || !netdataNoMorris) { + + // morris requires raphael + if (!NETDATA.chartLibraries.raphael.initialized) { + if (NETDATA.chartLibraries.raphael.enabled) { + NETDATA.raphaelInitialize(function() { + NETDATA.morrisInitialize(callback); + }); + } else { + NETDATA.chartLibraries.morris.enabled = false; + if (typeof callback === "function") + return callback(); + } + } else { + NETDATA._loadCSS(NETDATA.morris_css); + + $.ajax({ + url: NETDATA.morris_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('morris', NETDATA.morris_js); + }) + .fail(function() { + NETDATA.chartLibraries.morris.enabled = false; + NETDATA.error(100, NETDATA.morris_js); + }) + .always(function() { + if (typeof callback === "function") + return callback(); + }); + } + } else { + NETDATA.chartLibraries.morris.enabled = false; + if (typeof callback === "function") + return callback(); + } +}; + +NETDATA.morrisChartUpdate = function(state, data) { + state.morris_instance.setData(data.result.data); + return true; +}; + +NETDATA.morrisChartCreate = function(state, data) { + + state.morris_options = { + element: state.element_chart.id, + data: data.result.data, + xkey: 'time', + ykeys: data.dimension_names, + labels: data.dimension_names, + lineWidth: 2, + pointSize: 3, + smooth: true, + hideHover: 'auto', + parseTime: true, + continuousLine: false, + behaveLikeLine: false + }; + + if (state.chart.chart_type === 'line') + state.morris_instance = new Morris.Line(state.morris_options); + + else if (state.chart.chart_type === 'area') { + state.morris_options.behaveLikeLine = true; + state.morris_instance = new Morris.Area(state.morris_options); + } + else // stacked + state.morris_instance = new Morris.Area(state.morris_options); + + return true; +}; diff --git a/web/gui/src/dashboard.js/charting/_raphael.js b/web/gui/src/dashboard.js/charting/_raphael.js new file mode 100644 index 000000000..2d89a22a8 --- /dev/null +++ b/web/gui/src/dashboard.js/charting/_raphael.js @@ -0,0 +1,48 @@ + +// DEPRECATED: will be removed! + +// raphael + +NETDATA.raphaelInitialize = function(callback) { + if (typeof netdataStopRaphael === 'undefined' || !netdataStopRaphael) { + $.ajax({ + url: NETDATA.raphael_js, + cache: true, + dataType: "script", + xhrFields: { withCredentials: true } // required for the cookie + }) + .done(function() { + NETDATA.registerChartLibrary('raphael', NETDATA.raphael_js); + }) + .fail(function() { + NETDATA.chartLibraries.raphael.enabled = false; + NETDATA.error(100, NETDATA.raphael_js); + }) + .always(function() { + if (typeof callback === "function") + return callback(); + }); + } else { + NETDATA.chartLibraries.raphael.enabled = false; + if (typeof callback === "function") + return callback(); + } +}; + +NETDATA.raphaelChartUpdate = function(state, data) { + $(state.element_chart).raphael(data.result, { + width: state.chartWidth(), + height: state.chartHeight() + }); + + return false; +}; + +NETDATA.raphaelChartCreate = function(state, data) { + $(state.element_chart).raphael(data.result, { + width: state.chartWidth(), + height: state.chartHeight() + }); + + return false; +}; diff --git a/web/gui/src/dashboard.js/charting/dygraph.js b/web/gui/src/dashboard.js/charting/dygraph.js index 62cb466fc..a60af18b8 100644 --- a/web/gui/src/dashboard.js/charting/dygraph.js +++ b/web/gui/src/dashboard.js/charting/dygraph.js @@ -1,5 +1,9 @@ // dygraph +// Codacy declarations +/* global smoothPlotter */ +/* global Dygraph */ + NETDATA.dygraph = { smooth: false }; diff --git a/web/gui/src/dashboard.js/common.js b/web/gui/src/dashboard.js/common.js index aa9d4bac3..4a97babea 100644 --- a/web/gui/src/dashboard.js/common.js +++ b/web/gui/src/dashboard.js/common.js @@ -56,7 +56,7 @@ NETDATA.commonMin = { // for (let i in t) { // if (t.hasOwnProperty(i) && t[i] < m) m = t[i]; // } - for (const ti of Object.values(t)) { + for (var ti of Object.values(t)) { if (ti < m) { m = ti; } @@ -120,7 +120,7 @@ NETDATA.commonMax = { // for (let i in t) { // if (t.hasOwnProperty(i) && t[i] > m) m = t[i]; // } - for (const ti of Object.values(t)) { + for (var ti of Object.values(t)) { if (ti > m) { m = ti; } diff --git a/web/gui/src/dashboard.js/main.js b/web/gui/src/dashboard.js/main.js index 3d8cc3b7c..1d050d613 100644 --- a/web/gui/src/dashboard.js/main.js +++ b/web/gui/src/dashboard.js/main.js @@ -1,6 +1,9 @@ // *** src/dashboard.js/main.js +// Codacy declarations +/* global clipboard */ + if (NETDATA.options.debug.main_loop) { console.log('welcome to NETDATA'); } diff --git a/web/gui/src/dashboard.js/options.js b/web/gui/src/dashboard.js/options.js index 653740a8d..68132e7b2 100644 --- a/web/gui/src/dashboard.js/options.js +++ b/web/gui/src/dashboard.js/options.js @@ -18,7 +18,7 @@ if (typeof netdataIcons === 'object') { // if (NETDATA.icons.hasOwnProperty(icon) && typeof(netdataIcons[icon]) === 'string') // NETDATA.icons[icon] = netdataIcons[icon]; // } - for (const icon of Object.keys(NETDATA.icons)) { + for (var icon of Object.keys(NETDATA.icons)) { if (typeof(netdataIcons[icon]) === 'string') { NETDATA.icons[icon] = netdataIcons[icon] } @@ -38,7 +38,7 @@ if (typeof netdataShowAlarms === 'undefined') { } if (typeof netdataRegistryAfterMs !== 'number' || netdataRegistryAfterMs < 0) { - netdataRegistryAfterMs = 1500; + netdataRegistryAfterMs = 0; // 1500; } if (typeof netdataRegistry === 'undefined') { diff --git a/web/gui/src/dashboard.js/prologue.js.inc b/web/gui/src/dashboard.js/prologue.js.inc index ae9201bc7..afa1f0e05 100644 --- a/web/gui/src/dashboard.js/prologue.js.inc +++ b/web/gui/src/dashboard.js/prologue.js.inc @@ -77,7 +77,8 @@ // ---------------------------------------------------------------------------- // global namespace -const NETDATA = window.NETDATA || {}; +// Should stay var! +var NETDATA = window.NETDATA || {}; (function(window, document, $, undefined) { diff --git a/web/gui/src/dashboard.js/registry.js b/web/gui/src/dashboard.js/registry.js index b9d91291a..77a822b7b 100644 --- a/web/gui/src/dashboard.js/registry.js +++ b/web/gui/src/dashboard.js/registry.js @@ -3,6 +3,8 @@ NETDATA.registry = { server: null, // the netdata registry server + isCloudEnabled: false,// is netdata.cloud functionality enabled? + cloudBaseURL: null, // the netdata cloud base url person_guid: null, // the unique ID of this browser / user machine_guid: null, // the unique ID the netdata server that served dashboard.js hostname: 'unknown', // the hostname of the netdata server that served dashboard.js @@ -10,8 +12,17 @@ NETDATA.registry = { machines_array: null, // the user's other URLs in an array person_urls: null, + MASKED_DATA: "***", + + isUsingGlobalRegistry: function() { + return NETDATA.registry.server == "https://registry.my-netdata.io"; + }, + + isRegistryEnabled: function() { + return !(NETDATA.registry.isUsingGlobalRegistry() || isSignedIn()) + }, + parsePersonUrls: function (person_urls) { - // console.log(person_urls); NETDATA.registry.person_urls = person_urls; if (person_urls) { @@ -64,13 +75,21 @@ NETDATA.registry = { NETDATA.registry.hello(NETDATA.serverDefault, function (data) { if (data) { NETDATA.registry.server = data.registry; + if (data.cloud_base_url != "") { + NETDATA.registry.isCloudEnabled = true; + NETDATA.registry.cloudBaseURL = data.cloud_base_url; + } else { + NETDATA.registry.isCloudEnabled = false; + NETDATA.registry.cloudBaseURL = ""; + } NETDATA.registry.machine_guid = data.machine_guid; NETDATA.registry.hostname = data.hostname; - + if (dataLayer) { + if (data.anonymous_statistics) dataLayer.push({"anonymous_statistics" : "true", "machine_guid" : data.machine_guid}); + } NETDATA.registry.access(2, function (person_urls) { NETDATA.registry.parsePersonUrls(person_urls); - - }); + }); } }); }, @@ -113,13 +132,25 @@ NETDATA.registry = { }, access: function (max_redirects, callback) { + let name = NETDATA.registry.MASKED_DATA; + let url = NETDATA.registry.MASKED_DATA; + + if (!NETDATA.registry.isUsingGlobalRegistry()) { + // If the user is using a private registry keep sending identifiable + // data. + name = NETDATA.registry.hostname; + url = NETDATA.serverDefault; + } + + console.log("ACCESS", name, url); + // send ACCESS to a netdata registry: // 1. it lets it know we are accessing a netdata server (its machine GUID and its URL) // 2. it responds with a list of netdata servers we know // the registry identifies us using a cookie it sets the first time we access it // the registry may respond with a redirect URL to send us to another registry $.ajax({ - url: NETDATA.registry.server + '/api/v1/registry?action=access&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(NETDATA.registry.hostname) + '&url=' + encodeURIComponent(NETDATA.serverDefault), // + '&visible_url=' + encodeURIComponent(document.location), + url: NETDATA.registry.server + '/api/v1/registry?action=access&machine=' + NETDATA.registry.machine_guid + '&name=' + encodeURIComponent(name) + '&url=' + encodeURIComponent(url), // + '&visible_url=' + encodeURIComponent(document.location), async: true, cache: false, headers: { @@ -151,14 +182,14 @@ NETDATA.registry = { return callback(null); } } - } - else { + } else { if (typeof data.person_guid === 'string') { NETDATA.registry.person_guid = data.person_guid; } if (typeof callback === 'function') { - return callback(data.urls); + const urls = data.urls.filter((u) => u[1] !== NETDATA.registry.MASKED_DATA); + return callback(urls); } } }) diff --git a/web/gui/src/dashboard.js/themes.js b/web/gui/src/dashboard.js/themes.js index a83a1dd38..aafe15768 100644 --- a/web/gui/src/dashboard.js/themes.js +++ b/web/gui/src/dashboard.js/themes.js @@ -1,3 +1,5 @@ +// Codacy declarations +/* global netdataTheme */ NETDATA.themes = { white: { diff --git a/web/gui/src/dashboard.js/units-conversion.js b/web/gui/src/dashboard.js/units-conversion.js index e4eba57f1..26b840344 100644 --- a/web/gui/src/dashboard.js/units-conversion.js +++ b/web/gui/src/dashboard.js/units-conversion.js @@ -39,6 +39,21 @@ NETDATA.unitsConversion = { 'GB/s': 1024 * 1024, 'TB/s': 1024 * 1024 * 1024 }, + 'KiB/s': { + 'B/s': 1 / 1024, + 'KiB/s': 1, + 'MiB/s': 1024, + 'GiB/s': 1024 * 1024, + 'TiB/s': 1024 * 1024 * 1024 + }, + 'B': { + 'B': 1, + 'KiB': 1024, + 'MiB': 1024 * 1024, + 'GiB': 1024 * 1024 * 1024, + 'TiB': 1024 * 1024 * 1024 * 1024, + 'PiB': 1024 * 1024 * 1024 * 1024 * 1024 + }, 'KB': { 'B': 1 / 1024, 'KB': 1, @@ -46,6 +61,13 @@ NETDATA.unitsConversion = { 'GB': 1024 * 1024, 'TB': 1024 * 1024 * 1024 }, + 'KiB': { + 'B': 1 / 1024, + 'KiB': 1, + 'MiB': 1024, + 'GiB': 1024 * 1024, + 'TiB': 1024 * 1024 * 1024 + }, 'MB': { 'B': 1 / (1024 * 1024), 'KB': 1 / 1024, @@ -54,6 +76,14 @@ NETDATA.unitsConversion = { 'TB': 1024 * 1024, 'PB': 1024 * 1024 * 1024 }, + 'MiB': { + 'B': 1 / (1024 * 1024), + 'KiB': 1 / 1024, + 'MiB': 1, + 'GiB': 1024, + 'TiB': 1024 * 1024, + 'PiB': 1024 * 1024 * 1024 + }, 'GB': { 'B': 1 / (1024 * 1024 * 1024), 'KB': 1 / (1024 * 1024), @@ -62,6 +92,15 @@ NETDATA.unitsConversion = { 'TB': 1024, 'PB': 1024 * 1024, 'EB': 1024 * 1024 * 1024 + }, + 'GiB': { + 'B': 1 / (1024 * 1024 * 1024), + 'KiB': 1 / (1024 * 1024), + 'MiB': 1 / 1024, + 'GiB': 1, + 'TiB': 1024, + 'PiB': 1024 * 1024, + 'EiB': 1024 * 1024 * 1024 } /* 'milliseconds': { @@ -261,7 +300,7 @@ NETDATA.unitsConversion = { // } // } const sunit = this.scalableUnits[units]; - for (const x of Object.keys(sunit)) { + for (var x of Object.keys(sunit)) { let m = sunit[x]; if (m <= max && m > tdivider) { tunits = x; @@ -297,7 +336,7 @@ NETDATA.unitsConversion = { // find the max divider of all charts let common_units = t[uuid]; - for (const x in t) { + for (var x in t) { if (t.hasOwnProperty(x) && t[x].divider > common_units.divider) { common_units = t[x]; } @@ -364,7 +403,7 @@ NETDATA.unitsConversion = { } else if (typeof this.convertibleUnits[units] !== 'undefined') { // units that can be converted if (desired_units === 'auto') { - for (const x in this.convertibleUnits[units]) { + for (var x in this.convertibleUnits[units]) { if (this.convertibleUnits[units].hasOwnProperty(x)) { if (this.convertibleUnits[units][x].check(max)) { //console.log('DEBUG: ' + uuid.toString() + ' converting ' + units.toString() + ' to: ' + x.toString()); diff --git a/web/gui/src/dashboard.js/utils.js b/web/gui/src/dashboard.js/utils.js index 2d658dcc2..8014aaf17 100644 --- a/web/gui/src/dashboard.js/utils.js +++ b/web/gui/src/dashboard.js/utils.js @@ -75,7 +75,7 @@ NETDATA.seconds4human = function (seconds, options) { if (typeof options !== 'object') { options = defaultOptions; } else { - for (const x in defaultOptions) { + for (var x in defaultOptions) { if (typeof options[x] !== 'string') { options[x] = defaultOptions[x]; } diff --git a/web/gui/src/dashboard.js/xss.js b/web/gui/src/dashboard.js/xss.js index 3f9cd1ac7..fa66f34da 100644 --- a/web/gui/src/dashboard.js/xss.js +++ b/web/gui/src/dashboard.js/xss.js @@ -42,7 +42,7 @@ NETDATA.xss = { } else { // console.log('checking object "' + name + '"'); - for (const i in obj) { + for (var i in obj) { if (obj.hasOwnProperty(i) === false) { continue; } diff --git a/web/gui/version.txt b/web/gui/version.txt deleted file mode 100644 index 294ecdb3d..000000000 --- a/web/gui/version.txt +++ /dev/null @@ -1 +0,0 @@ -19e4b1c85e8e43788a08617af4cbacff0d8a170e diff --git a/web/server/Makefile.am b/web/server/Makefile.am index 843c4cc9b..5860a5cba 100644 --- a/web/server/Makefile.am +++ b/web/server/Makefile.am @@ -4,8 +4,6 @@ AUTOMAKE_OPTIONS = subdir-objects MAINTAINERCLEANFILES = $(srcdir)/Makefile.in SUBDIRS = \ - single \ - multi \ static \ $(NULL) diff --git a/web/server/Makefile.in b/web/server/Makefile.in deleted file mode 100644 index 4b2614c5a..000000000 --- a/web/server/Makefile.in +++ /dev/null @@ -1,650 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/server -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - single \ - multi \ - static \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/server/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/server/README.md b/web/server/README.md index 34ef628bc..7d74c181e 100644 --- a/web/server/README.md +++ b/web/server/README.md @@ -1,34 +1,21 @@ -# Netdata web server +# Web server -Netdata supports 3 implementations of its internal web server: - -- `static-threaded` is a web server with a fix (configured number of threads) -- `single-threaded` is a simple web server running with a single thread -- `multi-threaded` is a web server that spawns a thread for each client connection -- `none` to disable the web server - -We suggest to use the `static-threaded` one. It is the most efficient. - -All versions of the web servers use non-blocking I/O. - -All web servers respect the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. +The Netdata web server runs as `static-threaded`, i.e. with a fixed, configurable number of threads. +It uses non-blocking I/O and respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. ## Configuration -### Selecting the web server - -You can select the web server implementation by editing `netdata.conf` and setting: +You can disable the web server by editing `netdata.conf` and setting: ``` [web] - mode = none | single-threaded | multi-threaded | static-threaded + mode = none ``` -The `static` web server supports also these settings: +With the web server enabled, you can control the number of threads and sockets with the following settings: ``` [web] - mode = static-threaded web server threads = 4 web server max sockets = 512 ``` @@ -39,28 +26,37 @@ The `web server max sockets` setting is automatically adjusted to 50% of the max ### Binding netdata to multiple ports -Netdata can bind to multiple IPs and ports. Up to 100 sockets can be used (you can increase it at compile time with `CFLAGS="-DMAX_LISTEN_FDS=200" ./netdata-installer.sh ...`). +Netdata can bind to multiple IPs and ports, offering access to different services on each. Up to 100 sockets can be used (you can increase it at compile time with `CFLAGS="-DMAX_LISTEN_FDS=200" ./netdata-installer.sh ...`). The ports to bind are controlled via `[web].bind to`, like this: ``` [web] default port = 19999 - bind to = 127.0.0.1 10.1.1.1:19998 hostname:19997 [::]:19996 localhost:19995 *:http unix:/tmp/netdata.sock + bind to = 127.0.0.1=dashboard 10.1.1.1:19998=management|netdata.conf hostname:19997=badges [::]:19996=streaming localhost:19995=registry *:http=dashboard unix:/tmp/netdata.sock ``` Using the above, netdata will bind to: -- IPv4 127.0.0.1 at port 19999 (port was used from `default port`) -- IPv4 10.1.1.1 at port 19998 -- All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997 -- All IPv6 IPs at port 19996 -- All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996 -- All IPv4 and IPv6 IPs at port `http` as set in `/etc/services` -- Unix domain socket `/tmp/netdata.sock` +- IPv4 127.0.0.1 at port 19999 (port was used from `default port`). Only the UI (dashboard) and the read API will be accessible on this port. +- IPv4 10.1.1.1 at port 19998. The management API and netdata.conf will be accessible on this port. +- All the IPs `hostname` resolves to (both IPv4 and IPv6 depending on the resolved IPs) at port 19997. Only badges will be accessible on this port. +- All IPv6 IPs at port 19996. Only metric streaming requests from other netdata agents will be accepted on this port. +- All the IPs `localhost` resolves to (both IPv4 and IPv6 depending the resolved IPs) at port 19996. This port will only accept registry API requests. +- All IPv4 and IPv6 IPs at port `http` as set in `/etc/services`. Only the UI (dashboard) and the read API will be accessible on this port. +- Unix domain socket `/tmp/netdata.sock`. All requests are serviceable on this socket. The option `[web].default port` is used when an entries in `[web].bind to` do not specify a port. +Note that the access permissions specified with the `=request type|request type|...` format are available from version 1.12 onwards. +As shown in the example above, these permissions are optional, with the default being to permit all request types on the specified port. +The request types are strings identical to the `allow X from` directives of the access lists, i.e. `dashboard`, `streaming`, `registry`, `netdata.conf`, `badges` and `management`. +The access lists themselves and the general setting `allow connections from` in the next section are applied regardless of the ports that are configured to provide these services. +The API requests are serviced as follows: +- `dashboard` gives access to the UI, the read API and badges API calls. +- `badges` gives access only to the badges API calls. +- `management` gives access only to the management API calls. + ### Access lists Netdata supports access lists in `netdata.conf`: @@ -72,6 +68,7 @@ Netdata supports access lists in `netdata.conf`: allow badges from = * allow streaming from = * allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.* + allow management from = localhost ``` `*` does string matches on the IPs of the clients. @@ -90,7 +87,27 @@ Netdata supports access lists in `netdata.conf`: The setting in `netdata.conf` is checked before the ones in [stream.conf](../../streaming/stream.conf). - `allow netdata.conf from` checks the IP to allow `http://netdata.host:19999/netdata.conf`. - By default it allows only private lans. + The IPs listed are all the private IPv4 addresses, including link local IPv6 addresses. Keep in mind that connections to netdata API ports are filtered by `allow connections from`. So, IPs allowed by `allow netdata.conf from` should also be allowed by `allow connections from`. + +- `allow management from` checks the IPs to allow API management calls. Management via the API is currently supported for [health](../api/health/#health-management-api) + +### Other netdata.conf [web] section options +setting | default | info +:------:|:-------:|:---- +ses max window | `15` | See [single exponential smoothing](../api/queries/des/) +des max window | `15` | See [double exponential smoothing](../api/queries/des/) +listen backlog | `4096` | The port backlog. Check `man 2 listen`. +web files owner | `netdata` | The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, netdata will only serve files owned by user given in `run as user`. +web files group | `netdata` | If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not. +disconnect idle clients after seconds | `60` | The time in seconds to disconnect web clients after being totally idle. +timeout for first request | `60` | How long to wait for a client to send a request before closing the socket. Prevents slow request attacks. +accept a streaming request every seconds | `0` | Can be used to set a limit on how often a master Netdata server will accept streaming requests from the slaves in a [streaming and replication setup](../../streaming) +respect do not track policy | `no` | If set to `yes`, will respect the client's browser preferences on storing cookies. +x-frame-options response header | | [Avoid clickjacking attacks, by ensuring that the content is not embedded into other sites](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options). +enable gzip compression | `yes` | When set to `yes`, netdata web responses will be GZIP compressed, if the web client accepts such responses. +gzip compression strategy | `default` | Valid strategies are `default`, `filtered`, `huffman only`, `rle` and `fixed` +gzip compression level | `3` | Valid levels are 1 (fastest) to 9 (best ratio) + ## DDoS protection @@ -101,3 +118,6 @@ If you publish your netdata to the internet, you may want to apply some protecti 3. Don't use all your cpu cores for netdata (lower `[web].web server threads`) 4. Run netdata with a low process scheduling priority (the default is the lowest) 5. If possible, proxy netdata via a full featured web server (nginx, apache, etc) + + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/server/multi/Makefile.am b/web/server/multi/Makefile.am deleted file mode 100644 index 90cc9ca1e..000000000 --- a/web/server/multi/Makefile.am +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in - -SUBDIRS = \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) diff --git a/web/server/multi/Makefile.in b/web/server/multi/Makefile.in deleted file mode 100644 index 61ef9455f..000000000 --- a/web/server/multi/Makefile.in +++ /dev/null @@ -1,647 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/server/multi -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/multi/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/server/multi/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/server/multi/README.md b/web/server/multi/README.md deleted file mode 100644 index f51073e93..000000000 --- a/web/server/multi/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# `multi-threaded` web server - -The `multi-threaded` web server spawns a thread for each connection it receives. - -Each thread uses non-blocking I/O so it can serve any number of web requests in parallel, -though this is not supported by HTTP, so in practice each thread serves all the requests sequentially. - -Each thread respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. \ No newline at end of file diff --git a/web/server/multi/multi-threaded.c b/web/server/multi/multi-threaded.c deleted file mode 100644 index 37bdd38ad..000000000 --- a/web/server/multi/multi-threaded.c +++ /dev/null @@ -1,314 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#define WEB_SERVER_INTERNALS 1 -#include "multi-threaded.h" - -// -------------------------------------------------------------------------------------- -// the thread of a single client - for the MULTI-THREADED web server - -// 1. waits for input and output, using async I/O -// 2. it processes HTTP requests -// 3. it generates HTTP responses -// 4. it copies data from input to output if mode is FILECOPY - -int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS; -int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST; -long web_client_streaming_rate_t = 0L; - -static void multi_threaded_web_client_worker_main_cleanup(void *ptr) { - struct web_client *w = ptr; - WEB_CLIENT_IS_DEAD(w); - w->running = 0; -} - -static void *multi_threaded_web_client_worker_main(void *ptr) { - netdata_thread_cleanup_push(multi_threaded_web_client_worker_main_cleanup, ptr); - - struct web_client *w = ptr; - w->running = 1; - - struct pollfd fds[2], *ifd, *ofd; - int retval, timeout_ms; - nfds_t fdmax = 0; - - while(!netdata_exit) { - if(unlikely(web_client_check_dead(w))) { - debug(D_WEB_CLIENT, "%llu: client is dead.", w->id); - break; - } - else if(unlikely(!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))) { - debug(D_WEB_CLIENT, "%llu: client is not set for neither receiving nor sending data.", w->id); - break; - } - - if(unlikely(w->ifd < 0 || w->ofd < 0)) { - error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd", w->id, w->ifd, w->ofd); - break; - } - - if(w->ifd == w->ofd) { - fds[0].fd = w->ifd; - fds[0].events = 0; - fds[0].revents = 0; - - if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN; - if(web_client_has_wait_send(w)) fds[0].events |= POLLOUT; - - fds[1].fd = -1; - fds[1].events = 0; - fds[1].revents = 0; - - ifd = ofd = &fds[0]; - - fdmax = 1; - } - else { - fds[0].fd = w->ifd; - fds[0].events = 0; - fds[0].revents = 0; - if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN; - ifd = &fds[0]; - - fds[1].fd = w->ofd; - fds[1].events = 0; - fds[1].revents = 0; - if(web_client_has_wait_send(w)) fds[1].events |= POLLOUT; - ofd = &fds[1]; - - fdmax = 2; - } - - debug(D_WEB_CLIENT, "%llu: Waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":""); - errno = 0; - timeout_ms = web_client_timeout * 1000; - retval = poll(fds, fdmax, timeout_ms); - - if(unlikely(netdata_exit)) break; - - if(unlikely(retval == -1)) { - if(errno == EAGAIN || errno == EINTR) { - debug(D_WEB_CLIENT, "%llu: EAGAIN received.", w->id); - continue; - } - - debug(D_WEB_CLIENT, "%llu: LISTENER: poll() failed (input fd = %d, output fd = %d). Closing client.", w->id, w->ifd, w->ofd); - break; - } - else if(unlikely(!retval)) { - debug(D_WEB_CLIENT, "%llu: Timeout while waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":""); - break; - } - - if(unlikely(netdata_exit)) break; - - int used = 0; - if(web_client_has_wait_send(w) && ofd->revents & POLLOUT) { - used++; - if(web_client_send(w) < 0) { - debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id); - break; - } - } - - if(unlikely(netdata_exit)) break; - - if(web_client_has_wait_receive(w) && (ifd->revents & POLLIN || ifd->revents & POLLPRI)) { - used++; - if(web_client_receive(w) < 0) { - debug(D_WEB_CLIENT, "%llu: Cannot receive data from client. Closing client.", w->id); - break; - } - - if(w->mode == WEB_CLIENT_MODE_NORMAL) { - debug(D_WEB_CLIENT, "%llu: Attempting to process received data.", w->id); - web_client_process_request(w); - - // if the sockets are closed, may have transferred this client - // to plugins.d - if(unlikely(w->mode == WEB_CLIENT_MODE_STREAM)) - break; - } - } - - if(unlikely(!used)) { - debug(D_WEB_CLIENT_ACCESS, "%llu: Received error on socket.", w->id); - break; - } - } - - if(w->mode != WEB_CLIENT_MODE_STREAM) - web_server_log_connection(w, "DISCONNECTED"); - - web_client_request_done(w); - - debug(D_WEB_CLIENT, "%llu: done...", w->id); - - // close the sockets/files now - // to free file descriptors - if(w->ifd == w->ofd) { - if(w->ifd != -1) close(w->ifd); - } - else { - if(w->ifd != -1) close(w->ifd); - if(w->ofd != -1) close(w->ofd); - } - w->ifd = -1; - w->ofd = -1; - - netdata_thread_cleanup_pop(1); - return NULL; -} - -// -------------------------------------------------------------------------------------- -// the main socket listener - MULTI-THREADED - -// 1. it accepts new incoming requests on our port -// 2. creates a new web_client for each connection received -// 3. spawns a new netdata_thread to serve the client (this is optimal for keep-alive clients) -// 4. cleans up old web_clients that their netdata_threads have been exited - -static void web_client_multi_threaded_web_server_release_clients(void) { - struct web_client *w; - for(w = web_clients_cache.used; w ; ) { - if(unlikely(!w->running && web_client_check_dead(w))) { - struct web_client *t = w->next; - web_client_release(w); - w = t; - } - else - w = w->next; - } -} - -static void web_client_multi_threaded_web_server_stop_all_threads(void) { - struct web_client *w; - - int found = 1; - usec_t max = 2 * USEC_PER_SEC, step = 50000; - for(w = web_clients_cache.used; w ; w = w->next) { - if(w->running) { - found++; - info("stopping web client %s, id %llu", w->client_ip, w->id); - netdata_thread_cancel(w->thread); - } - } - - while(found && max > 0) { - max -= step; - info("Waiting %d web threads to finish...", found); - sleep_usec(step); - found = 0; - for(w = web_clients_cache.used; w ; w = w->next) - if(w->running) found++; - } - - if(found) - error("%d web threads are taking too long to finish. Giving up.", found); -} - -static struct pollfd *socket_listen_main_multi_threaded_fds = NULL; - -static void socket_listen_main_multi_threaded_cleanup(void *data) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - info("releasing allocated memory..."); - freez(socket_listen_main_multi_threaded_fds); - - info("closing all sockets..."); - listen_sockets_close(&api_sockets); - - info("stopping all running web server threads..."); - web_client_multi_threaded_web_server_stop_all_threads(); - - info("freeing web clients cache..."); - web_client_cache_destroy(); - - info("cleanup completed."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -#define CLEANUP_EVERY_EVENTS 60 -void *socket_listen_main_multi_threaded(void *ptr) { - netdata_thread_cleanup_push(socket_listen_main_multi_threaded_cleanup, ptr); - - web_server_mode = WEB_SERVER_MODE_MULTI_THREADED; - web_server_is_multithreaded = 1; - - struct web_client *w; - int retval, counter = 0; - - if(!api_sockets.opened) - fatal("LISTENER: No sockets to listen to."); - - socket_listen_main_multi_threaded_fds = callocz(sizeof(struct pollfd), api_sockets.opened); - - size_t i; - for(i = 0; i < api_sockets.opened ;i++) { - socket_listen_main_multi_threaded_fds[i].fd = api_sockets.fds[i]; - socket_listen_main_multi_threaded_fds[i].events = POLLIN; - socket_listen_main_multi_threaded_fds[i].revents = 0; - - info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN"); - } - - int timeout_ms = 1 * 1000; - - while(!netdata_exit) { - - // debug(D_WEB_CLIENT, "LISTENER: Waiting..."); - retval = poll(socket_listen_main_multi_threaded_fds, api_sockets.opened, timeout_ms); - - if(unlikely(retval == -1)) { - error("LISTENER: poll() failed."); - continue; - } - else if(unlikely(!retval)) { - debug(D_WEB_CLIENT, "LISTENER: poll() timeout."); - counter++; - continue; - } - - for(i = 0 ; i < api_sockets.opened ; i++) { - short int revents = socket_listen_main_multi_threaded_fds[i].revents; - - // check for new incoming connections - if(revents & POLLIN || revents & POLLPRI) { - socket_listen_main_multi_threaded_fds[i].revents = 0; - - w = web_client_create_on_listenfd(socket_listen_main_multi_threaded_fds[i].fd); - if(unlikely(!w)) { - // no need for error log - web_client_create_on_listenfd already logged the error - continue; - } - - if(api_sockets.fds_families[i] == AF_UNIX) - web_client_set_unix(w); - else - web_client_set_tcp(w); - - char tag[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(tag, NETDATA_THREAD_TAG_MAX, "WEB_CLIENT[%llu,[%s]:%s]", w->id, w->client_ip, w->client_port); - - w->running = 1; - if(netdata_thread_create(&w->thread, tag, NETDATA_THREAD_OPTION_DONT_LOG, multi_threaded_web_client_worker_main, w) != 0) { - w->running = 0; - web_client_release(w); - } - } - } - - counter++; - if(counter > CLEANUP_EVERY_EVENTS) { - counter = 0; - web_client_multi_threaded_web_server_release_clients(); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - - diff --git a/web/server/multi/multi-threaded.h b/web/server/multi/multi-threaded.h deleted file mode 100644 index d7ebf3c54..000000000 --- a/web/server/multi/multi-threaded.h +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_WEB_SERVER_MULTI_THREADED_H -#define NETDATA_WEB_SERVER_MULTI_THREADED_H - -#include "web/server/web_server.h" - -extern void *socket_listen_main_multi_threaded(void *ptr); - -#endif //NETDATA_WEB_SERVER_MULTI_THREADED_H diff --git a/web/server/single/Makefile.am b/web/server/single/Makefile.am deleted file mode 100644 index 90cc9ca1e..000000000 --- a/web/server/single/Makefile.am +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in - -SUBDIRS = \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) diff --git a/web/server/single/Makefile.in b/web/server/single/Makefile.in deleted file mode 100644 index a8ae2a35b..000000000 --- a/web/server/single/Makefile.in +++ /dev/null @@ -1,647 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/server/single -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/single/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/server/single/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/server/single/README.md b/web/server/single/README.md deleted file mode 100644 index df5fe56e3..000000000 --- a/web/server/single/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# `single-threaded` web server - -The `single-threaded` web server runs as a single thread inside netdata. -It uses non-blocking I/O so it can serve any number of web requests in parallel. - -This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. \ No newline at end of file diff --git a/web/server/single/single-threaded.c b/web/server/single/single-threaded.c deleted file mode 100644 index 7e89ee683..000000000 --- a/web/server/single/single-threaded.c +++ /dev/null @@ -1,194 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#define WEB_SERVER_INTERNALS 1 -#include "single-threaded.h" - -// -------------------------------------------------------------------------------------- -// the main socket listener - SINGLE-THREADED - -struct web_client *single_threaded_clients[FD_SETSIZE]; - -static inline int single_threaded_link_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds, int *max) { - if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) { - return 1; - } - - if(unlikely(w->ifd < 0 || w->ifd >= (int)FD_SETSIZE || w->ofd < 0 || w->ofd >= (int)FD_SETSIZE)) { - error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd < FD_SETSIZE (%d)", w->id, w->ifd, w->ofd, (int)FD_SETSIZE); - return 1; - } - - FD_SET(w->ifd, efds); - if(unlikely(*max < w->ifd)) *max = w->ifd; - - if(unlikely(w->ifd != w->ofd)) { - if(*max < w->ofd) *max = w->ofd; - FD_SET(w->ofd, efds); - } - - if(web_client_has_wait_receive(w)) FD_SET(w->ifd, ifds); - if(web_client_has_wait_send(w)) FD_SET(w->ofd, ofds); - - single_threaded_clients[w->ifd] = w; - single_threaded_clients[w->ofd] = w; - - return 0; -} - -static inline int single_threaded_unlink_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds) { - FD_CLR(w->ifd, efds); - if(unlikely(w->ifd != w->ofd)) FD_CLR(w->ofd, efds); - - if(web_client_has_wait_receive(w)) FD_CLR(w->ifd, ifds); - if(web_client_has_wait_send(w)) FD_CLR(w->ofd, ofds); - - single_threaded_clients[w->ifd] = NULL; - single_threaded_clients[w->ofd] = NULL; - - if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) { - return 1; - } - - return 0; -} - -static void socket_listen_main_single_threaded_cleanup(void *data) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("closing all sockets..."); - listen_sockets_close(&api_sockets); - - info("freeing web clients cache..."); - web_client_cache_destroy(); - - info("cleanup completed."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *socket_listen_main_single_threaded(void *ptr) { - netdata_thread_cleanup_push(socket_listen_main_single_threaded_cleanup, ptr); - web_server_mode = WEB_SERVER_MODE_SINGLE_THREADED; - web_server_is_multithreaded = 0; - - struct web_client *w; - - if(!api_sockets.opened) - fatal("LISTENER: no listen sockets available."); - - size_t i; - for(i = 0; i < (size_t)FD_SETSIZE ; i++) - single_threaded_clients[i] = NULL; - - fd_set ifds, ofds, efds, rifds, rofds, refds; - FD_ZERO (&ifds); - FD_ZERO (&ofds); - FD_ZERO (&efds); - int fdmax = 0; - - for(i = 0; i < api_sockets.opened ; i++) { - if (api_sockets.fds[i] < 0 || api_sockets.fds[i] >= (int)FD_SETSIZE) - fatal("LISTENER: Listen socket %d is not ready, or invalid.", api_sockets.fds[i]); - - info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN"); - - FD_SET(api_sockets.fds[i], &ifds); - FD_SET(api_sockets.fds[i], &efds); - if(fdmax < api_sockets.fds[i]) - fdmax = api_sockets.fds[i]; - } - - while(!netdata_exit) { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server waiting (fdmax = %d)...", fdmax); - - struct timeval tv = { .tv_sec = 1, .tv_usec = 0 }; - rifds = ifds; - rofds = ofds; - refds = efds; - int retval = select(fdmax+1, &rifds, &rofds, &refds, &tv); - - if(unlikely(retval == -1)) { - error("LISTENER: select() failed."); - continue; - } - else if(likely(retval)) { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: got something."); - - for(i = 0; i < api_sockets.opened ; i++) { - if (FD_ISSET(api_sockets.fds[i], &rifds)) { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: new connection."); - w = web_client_create_on_listenfd(api_sockets.fds[i]); - if(unlikely(!w)) - continue; - - if(api_sockets.fds_families[i] == AF_UNIX) - web_client_set_unix(w); - else - web_client_set_tcp(w); - - if (single_threaded_link_client(w, &ifds, &ofds, &ifds, &fdmax) != 0) { - web_client_release(w); - } - } - } - - for(i = 0 ; i <= (size_t)fdmax ; i++) { - if(likely(!FD_ISSET(i, &rifds) && !FD_ISSET(i, &rofds) && !FD_ISSET(i, &refds))) - continue; - - w = single_threaded_clients[i]; - if(unlikely(!w)) { - // error("no client on slot %zu", i); - continue; - } - - if(unlikely(single_threaded_unlink_client(w, &ifds, &ofds, &efds) != 0)) { - // error("failed to unlink client %zu", i); - web_client_release(w); - continue; - } - - if (unlikely(FD_ISSET(w->ifd, &refds) || FD_ISSET(w->ofd, &refds))) { - // error("no input on client %zu", i); - web_client_release(w); - continue; - } - - if (unlikely(web_client_has_wait_receive(w) && FD_ISSET(w->ifd, &rifds))) { - if (unlikely(web_client_receive(w) < 0)) { - // error("cannot read from client %zu", i); - web_client_release(w); - continue; - } - - if (w->mode != WEB_CLIENT_MODE_FILECOPY) { - debug(D_WEB_CLIENT, "%llu: Processing received data.", w->id); - web_client_process_request(w); - } - } - - if (unlikely(web_client_has_wait_send(w) && FD_ISSET(w->ofd, &rofds))) { - if (unlikely(web_client_send(w) < 0)) { - // error("cannot send data to client %zu", i); - debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id); - web_client_release(w); - continue; - } - } - - if(unlikely(single_threaded_link_client(w, &ifds, &ofds, &efds, &fdmax) != 0)) { - // error("failed to link client %zu", i); - web_client_release(w); - } - } - } - else { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server timeout."); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - - diff --git a/web/server/single/single-threaded.h b/web/server/single/single-threaded.h deleted file mode 100644 index fab4ceba1..000000000 --- a/web/server/single/single-threaded.h +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_WEB_SERVER_SINGLE_THREADED_H -#define NETDATA_WEB_SERVER_SINGLE_THREADED_H - -#include "web/server/web_server.h" - -extern void *socket_listen_main_single_threaded(void *ptr); - -#endif //NETDATA_WEB_SERVER_SINGLE_THREADED_H diff --git a/web/server/static/Makefile.in b/web/server/static/Makefile.in deleted file mode 100644 index f9dda4fa2..000000000 --- a/web/server/static/Makefile.in +++ /dev/null @@ -1,647 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# SPDX-License-Identifier: GPL-3.0-or-later - -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -subdir = web/server/static -DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ - $(dist_noinst_DATA) -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \ - $(top_srcdir)/build/m4/ax_c__generic.m4 \ - $(top_srcdir)/build/m4/ax_c_lto.m4 \ - $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \ - $(top_srcdir)/build/m4/ax_c_mallopt.m4 \ - $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \ - $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \ - $(top_srcdir)/build/m4/ax_pthread.m4 \ - $(top_srcdir)/build/m4/jemalloc.m4 \ - $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ - ctags-recursive dvi-recursive html-recursive info-recursive \ - install-data-recursive install-dvi-recursive \ - install-exec-recursive install-html-recursive \ - install-info-recursive install-pdf-recursive \ - install-ps-recursive install-recursive installcheck-recursive \ - installdirs-recursive pdf-recursive ps-recursive \ - tags-recursive uninstall-recursive -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -DATA = $(dist_noinst_DATA) -RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ - distclean-recursive maintainer-clean-recursive -am__recursive_targets = \ - $(RECURSIVE_TARGETS) \ - $(RECURSIVE_CLEAN_TARGETS) \ - $(am__extra_recursive_targets) -AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - distdir -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -am__relativize = \ - dir0=`pwd`; \ - sed_first='s,^\([^/]*\)/.*$$,\1,'; \ - sed_rest='s,^[^/]*/*,,'; \ - sed_last='s,^.*/\([^/]*\)$$,\1,'; \ - sed_butlast='s,/*[^/]*$$,,'; \ - while test -n "$$dir1"; do \ - first=`echo "$$dir1" | sed -e "$$sed_first"`; \ - if test "$$first" != "."; then \ - if test "$$first" = ".."; then \ - dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ - dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ - else \ - first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ - if test "$$first2" = "$$first"; then \ - dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ - else \ - dir2="../$$dir2"; \ - fi; \ - dir0="$$dir0"/"$$first"; \ - fi; \ - fi; \ - dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ - done; \ - reldir="$$dir2" -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@ -IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@ -LDFLAGS = @LDFLAGS@ -LIBCAP_CFLAGS = @LIBCAP_CFLAGS@ -LIBCAP_LIBS = @LIBCAP_LIBS@ -LIBMNL_CFLAGS = @LIBMNL_CFLAGS@ -LIBMNL_LIBS = @LIBMNL_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MATH_CFLAGS = @MATH_CFLAGS@ -MATH_LIBS = @MATH_LIBS@ -MKDIR_P = @MKDIR_P@ -NFACCT_CFLAGS = @NFACCT_CFLAGS@ -NFACCT_LIBS = @NFACCT_LIBS@ -OBJEXT = @OBJEXT@ -OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@ -OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@ -OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@ -OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@ -OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@ -OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@ -OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@ -OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@ -OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@ -OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@ -OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@ -OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@ -PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ -PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ -PTHREAD_CC = @PTHREAD_CC@ -PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ -PTHREAD_LIBS = @PTHREAD_LIBS@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SSE_CANDIDATE = @SSE_CANDIDATE@ -STRIP = @STRIP@ -UUID_CFLAGS = @UUID_CFLAGS@ -UUID_LIBS = @UUID_LIBS@ -VERSION = @VERSION@ -ZLIB_CFLAGS = @ZLIB_CFLAGS@ -ZLIB_LIBS = @ZLIB_LIBS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -ax_pthread_config = @ax_pthread_config@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_target = @build_target@ -build_vendor = @build_vendor@ -builddir = @builddir@ -cachedir = @cachedir@ -chartsdir = @chartsdir@ -configdir = @configdir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -has_jemalloc = @has_jemalloc@ -has_tcmalloc = @has_tcmalloc@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libconfigdir = @libconfigdir@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -logdir = @logdir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -nodedir = @nodedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -pluginsdir = @pluginsdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -pythondir = @pythondir@ -registrydir = @registrydir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -varlibdir = @varlibdir@ -webdir = @webdir@ -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -SUBDIRS = \ - $(NULL) - -dist_noinst_DATA = \ - README.md \ - $(NULL) - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu web/server/static/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --gnu web/server/static/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -# This directory's subdirectories are mostly independent; you can cd -# into them and run 'make' without going through this Makefile. -# To change the values of 'make' variables: instead of editing Makefiles, -# (1) if the variable is set in 'config.status', edit 'config.status' -# (which will cause the Makefiles to be regenerated when you run 'make'); -# (2) otherwise, pass the desired values on the 'make' command line. -$(am__recursive_targets): - @fail=; \ - if $(am__make_keepgoing); then \ - failcom='fail=yes'; \ - else \ - failcom='exit 1'; \ - fi; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-recursive -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-recursive - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-recursive - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - $(am__make_dryrun) \ - || test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ - $(am__relativize); \ - new_distdir=$$reldir; \ - dir1=$$subdir; dir2="$(top_distdir)"; \ - $(am__relativize); \ - new_top_distdir=$$reldir; \ - echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ - echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ - ($(am__cd) $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$new_top_distdir" \ - distdir="$$new_distdir" \ - am__remove_distdir=: \ - am__skip_length_check=: \ - am__skip_mode_fix=: \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." - -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) -clean: clean-recursive - -clean-am: clean-generic mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -html-am: - -info: info-recursive - -info-am: - -install-data-am: - -install-dvi: install-dvi-recursive - -install-dvi-am: - -install-exec-am: - -install-html: install-html-recursive - -install-html-am: - -install-info: install-info-recursive - -install-info-am: - -install-man: - -install-pdf: install-pdf-recursive - -install-pdf-am: - -install-ps: install-ps-recursive - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: - -.MAKE: $(am__recursive_targets) install-am install-strip - -.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ - check-am clean clean-generic cscopelist-am ctags ctags-am \ - distclean distclean-generic distclean-tags distdir dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-am uninstall uninstall-am - - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/web/server/static/README.md b/web/server/static/README.md index 16febdb32..653b364bc 100644 --- a/web/server/static/README.md +++ b/web/server/static/README.md @@ -6,4 +6,5 @@ The kernel distributes the incoming requests to them. Each thread uses non-blocking I/O so it can serve any number of web requests in parallel. -This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. \ No newline at end of file +This web server respects the `keep-alive` HTTP header to serve multiple HTTP requests via the same connection. +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fweb%2Fserver%2Fstatic%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/web/server/static/static-threaded.c b/web/server/static/static-threaded.c index 56b8dbf8d..56e726ba1 100644 --- a/web/server/static/static-threaded.c +++ b/web/server/static/static-threaded.c @@ -3,10 +3,14 @@ #define WEB_SERVER_INTERNALS 1 #include "static-threaded.h" +int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS; +int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST; +long web_client_streaming_rate_t = 0L; + // ---------------------------------------------------------------------------- // high level web clients connection management -static struct web_client *web_client_create_on_fd(int fd, const char *client_ip, const char *client_port) { +static struct web_client *web_client_create_on_fd(int fd, const char *client_ip, const char *client_port, int port_acl) { struct web_client *w; w = web_client_get_from_cache_or_allocate(); @@ -17,6 +21,7 @@ static struct web_client *web_client_create_on_fd(int fd, const char *client_ip, if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-"); if(unlikely(!*w->client_port)) strcpy(w->client_port, "-"); + w->port_acl = port_acl; web_client_initialize_connection(w); return(w); @@ -44,6 +49,7 @@ struct web_server_static_threaded_worker { }; static long long static_threaded_workers_count = 1; + static struct web_server_static_threaded_worker *static_workers_private_data = NULL; static __thread struct web_server_static_threaded_worker *worker_private = NULL; @@ -143,7 +149,7 @@ static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data *events = POLLIN; debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd); - struct web_client *w = web_client_create_on_fd(pi->fd, pi->client_ip, pi->client_port); + struct web_client *w = web_client_create_on_fd(pi->fd, pi->client_ip, pi->client_port, pi->port_acl); w->pollinfo_slot = pi->slot; if(unlikely(pi->socktype == AF_UNIX)) @@ -200,6 +206,7 @@ static int web_server_rcv_callback(POLLINFO *pi, short int *events) { POLLINFO *fpi = poll_add_fd( pi->p , w->ifd + , pi->port_acl , 0 , POLLINFO_FLAG_CLIENT_SOCKET , "FILENAME" @@ -394,7 +401,13 @@ void *socket_listen_main_static_threaded(void *ptr) { // so, if the machine has more CPUs, avoid using resources unnecessarily int def_thread_count = (processors > 6)?6:processors; + if (!strcmp(config_get(CONFIG_SECTION_WEB, "mode", ""),"single-threaded")) { + info("Running web server with one thread, because mode is single-threaded"); + config_set(CONFIG_SECTION_WEB, "mode", "static-threaded"); + def_thread_count = 1; + } static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count); + if(static_threaded_workers_count < 1) static_threaded_workers_count = 1; size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 2)); diff --git a/web/server/web_client.c b/web/server/web_client.c index 282cfcd1a..4e34ae3a3 100644 --- a/web/server/web_client.c +++ b/web/server/web_client.c @@ -157,6 +157,10 @@ void web_client_request_done(struct web_client *w) { w->origin[1] = '\0'; freez(w->user_agent); w->user_agent = NULL; + if (w->auth_bearer_token) { + freez(w->auth_bearer_token); + w->auth_bearer_token = NULL; + } w->mode = WEB_CLIENT_MODE_NORMAL; @@ -577,10 +581,17 @@ static inline int check_host_and_dashboard_acl_and_call(RRDHOST *host, struct we return check_host_and_call(host, w, url, func); } +static inline int check_host_and_mgmt_acl_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) { + if(!web_client_can_access_mgmt(w)) + return web_client_permission_denied(w); + + return check_host_and_call(host, w, url, func); +} + int web_client_api_request(RRDHOST *host, struct web_client *w, char *url) { // get the api version - char *tok = mystrsep(&url, "/?&"); + char *tok = mystrsep(&url, "/"); if(tok && *tok) { debug(D_WEB_CLIENT, "%llu: Searching for API version '%s'.", w->id, tok); if(strcmp(tok, "v1") == 0) @@ -713,7 +724,7 @@ const char *web_response_code_to_string(int code) { } static inline char *http_header_parse(struct web_client *w, char *s, int parse_useragent) { - static uint32_t hash_origin = 0, hash_connection = 0, hash_accept_encoding = 0, hash_donottrack = 0, hash_useragent = 0; + static uint32_t hash_origin = 0, hash_connection = 0, hash_accept_encoding = 0, hash_donottrack = 0, hash_useragent = 0, hash_authorization = 0; if(unlikely(!hash_origin)) { hash_origin = simple_uhash("Origin"); @@ -721,6 +732,7 @@ static inline char *http_header_parse(struct web_client *w, char *s, int parse_u hash_accept_encoding = simple_uhash("Accept-Encoding"); hash_donottrack = simple_uhash("DNT"); hash_useragent = simple_uhash("User-Agent"); + hash_authorization = simple_uhash("X-Auth-Token"); } char *e = s; @@ -765,6 +777,8 @@ static inline char *http_header_parse(struct web_client *w, char *s, int parse_u } else if(parse_useragent && hash == hash_useragent && !strcasecmp(s, "User-Agent")) { w->user_agent = strdupz(v); + } else if(hash == hash_authorization&& !strcasecmp(s, "X-Auth-Token")) { + w->auth_bearer_token = strdupz(v); } #ifdef NETDATA_WITH_ZLIB else if(hash == hash_accept_encoding && !strcasecmp(s, "Accept-Encoding")) { @@ -1071,7 +1085,7 @@ static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, ch return 400; } - char *tok = mystrsep(&url, "/?&"); + char *tok = mystrsep(&url, "/"); if(tok && *tok) { debug(D_WEB_CLIENT, "%llu: Searching for host with name '%s'.", w->id, tok); @@ -1163,7 +1177,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch buffer_flush(w->response.data); // get the name of the data to show - tok = mystrsep(&url, "/?&"); + tok = mystrsep(&url, "&"); if(tok && *tok) { debug(D_WEB_CLIENT, "%llu: Searching for RRD data with name '%s'.", w->id, tok); @@ -1239,9 +1253,15 @@ void web_client_process_request(struct web_client *w) { return; case WEB_CLIENT_MODE_OPTIONS: - if(unlikely(!web_client_can_access_dashboard(w) && !web_client_can_access_registry(w) && !web_client_can_access_badges(w))) { + if(unlikely( + !web_client_can_access_dashboard(w) && + !web_client_can_access_registry(w) && + !web_client_can_access_badges(w) && + !web_client_can_access_mgmt(w) && + !web_client_can_access_netdataconf(w) + )) { web_client_permission_denied(w); - return; + break; } w->response.data->contenttype = CT_TEXT_PLAIN; @@ -1252,9 +1272,15 @@ void web_client_process_request(struct web_client *w) { case WEB_CLIENT_MODE_FILECOPY: case WEB_CLIENT_MODE_NORMAL: - if(unlikely(!web_client_can_access_dashboard(w) && !web_client_can_access_registry(w) && !web_client_can_access_badges(w))) { + if(unlikely( + !web_client_can_access_dashboard(w) && + !web_client_can_access_registry(w) && + !web_client_can_access_badges(w) && + !web_client_can_access_mgmt(w) && + !web_client_can_access_netdataconf(w) + )) { web_client_permission_denied(w); - return; + break; } w->response.code = web_client_process_url(localhost, w, w->decoded_url); diff --git a/web/server/web_client.h b/web/server/web_client.h index b9e528fca..4263e252a 100644 --- a/web/server/web_client.h +++ b/web/server/web_client.h @@ -108,31 +108,14 @@ struct response { }; -typedef enum web_client_acl { - WEB_CLIENT_ACL_NONE = 0, - WEB_CLIENT_ACL_NOCHECK = 0, - WEB_CLIENT_ACL_DASHBOARD = 1 << 0, - WEB_CLIENT_ACL_REGISTRY = 1 << 1, - WEB_CLIENT_ACL_BADGE = 1 << 2 -} WEB_CLIENT_ACL; - -#define web_client_can_access_dashboard(w) ((w)->acl & WEB_CLIENT_ACL_DASHBOARD) -#define web_client_can_access_registry(w) ((w)->acl & WEB_CLIENT_ACL_REGISTRY) -#define web_client_can_access_badges(w) ((w)->acl & WEB_CLIENT_ACL_BADGE) - -#define web_client_can_access_stream(w) \ - (!web_allow_streaming_from || simple_pattern_matches(web_allow_streaming_from, (w)->client_ip)) - -#define web_client_can_access_netdataconf(w) \ - (!web_allow_netdataconf_from || simple_pattern_matches(web_allow_netdataconf_from, (w)->client_ip)) - struct web_client { unsigned long long id; WEB_CLIENT_FLAGS flags; // status flags for the client WEB_CLIENT_MODE mode; // the operational mode of the client WEB_CLIENT_ACL acl; // the access list of the client - + int port_acl; // the operations permitted on the port the client connected to + char *auth_bearer_token; // the Bearer auth token (if sent) size_t header_parse_tries; size_t header_parse_last_size; diff --git a/web/server/web_server.c b/web/server/web_server.c index 5a68b125e..11f7edf8a 100644 --- a/web/server/web_server.c +++ b/web/server/web_server.c @@ -3,12 +3,6 @@ #define WEB_SERVER_INTERNALS 1 #include "web_server.h" -// this file includes 3 web servers: -// -// 1. single-threaded, based on select() -// 2. multi-threaded, based on poll() that spawns threads to handle the requests, based on select() -// 3. static-threaded, based on poll() using a fixed number of threads (configured at netdata.conf) - WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED; // -------------------------------------------------------------------------------------- @@ -16,28 +10,18 @@ WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED; WEB_SERVER_MODE web_server_mode_id(const char *mode) { if(!strcmp(mode, "none")) return WEB_SERVER_MODE_NONE; - else if(!strcmp(mode, "single") || !strcmp(mode, "single-threaded")) - return WEB_SERVER_MODE_SINGLE_THREADED; - else if(!strcmp(mode, "static") || !strcmp(mode, "static-threaded")) + else return WEB_SERVER_MODE_STATIC_THREADED; - else // if(!strcmp(mode, "multi") || !strcmp(mode, "multi-threaded")) - return WEB_SERVER_MODE_MULTI_THREADED; + } const char *web_server_mode_name(WEB_SERVER_MODE id) { switch(id) { case WEB_SERVER_MODE_NONE: return "none"; - - case WEB_SERVER_MODE_SINGLE_THREADED: - return "single-threaded"; - + default: case WEB_SERVER_MODE_STATIC_THREADED: return "static-threaded"; - - default: - case WEB_SERVER_MODE_MULTI_THREADED: - return "multi-threaded"; } } @@ -45,20 +29,44 @@ const char *web_server_mode_name(WEB_SERVER_MODE id) { // API sockets LISTEN_SOCKETS api_sockets = { - .config = &netdata_config, - .config_section = CONFIG_SECTION_WEB, - .default_bind_to = "*", - .default_port = API_LISTEN_PORT, - .backlog = API_LISTEN_BACKLOG + .config = &netdata_config, + .config_section = CONFIG_SECTION_WEB, + .default_bind_to = "*", + .default_port = API_LISTEN_PORT, + .backlog = API_LISTEN_BACKLOG }; -int api_listen_sockets_setup(void) { - int socks = listen_sockets_setup(&api_sockets); +void debug_sockets() { + BUFFER *wb = buffer_create(256 * sizeof(char)); + int i; + + for(i = 0 ; i < (int)api_sockets.opened ; i++) { + buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_NOCHECK)?"NONE ":""); + buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_DASHBOARD)?"dashboard ":""); + buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_REGISTRY)?"registry ":""); + buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_BADGE)?"badges ":""); + buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_MGMT)?"management ":""); + buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_STREAMING)?"streaming ":""); + buffer_strcat(wb, (api_sockets.fds_acl_flags[i] & WEB_CLIENT_ACL_NETDATACONF)?"netdata.conf ":""); + debug(D_WEB_CLIENT, "Socket fd %d name '%s' acl_flags: %s", + i, + api_sockets.fds_names[i], + buffer_tostring(wb)); + buffer_reset(wb); + } + buffer_free(wb); +} + +void api_listen_sockets_setup(void) { + int socks = listen_sockets_setup(&api_sockets); + + if(!socks) + fatal("LISTENER: Cannot listen on any API socket. Exiting..."); - if(!socks) - fatal("LISTENER: Cannot listen on any API socket. Exiting..."); + if(unlikely(debug_flags & D_WEB_CLIENT)) + debug_sockets(); - return socks; + return; } @@ -66,13 +74,14 @@ int api_listen_sockets_setup(void) { // access lists SIMPLE_PATTERN *web_allow_connections_from = NULL; -SIMPLE_PATTERN *web_allow_streaming_from = NULL; -SIMPLE_PATTERN *web_allow_netdataconf_from = NULL; // WEB_CLIENT_ACL SIMPLE_PATTERN *web_allow_dashboard_from = NULL; SIMPLE_PATTERN *web_allow_registry_from = NULL; SIMPLE_PATTERN *web_allow_badges_from = NULL; +SIMPLE_PATTERN *web_allow_mgmt_from = NULL; +SIMPLE_PATTERN *web_allow_streaming_from = NULL; +SIMPLE_PATTERN *web_allow_netdataconf_from = NULL; void web_client_update_acl_matches(struct web_client *w) { w->acl = WEB_CLIENT_ACL_NONE; @@ -85,6 +94,17 @@ void web_client_update_acl_matches(struct web_client *w) { if(!web_allow_badges_from || simple_pattern_matches(web_allow_badges_from, w->client_ip)) w->acl |= WEB_CLIENT_ACL_BADGE; + + if(!web_allow_mgmt_from || simple_pattern_matches(web_allow_mgmt_from, w->client_ip)) + w->acl |= WEB_CLIENT_ACL_MGMT; + + if(!web_allow_streaming_from || simple_pattern_matches(web_allow_streaming_from, w->client_ip)) + w->acl |= WEB_CLIENT_ACL_STREAMING; + + if(!web_allow_netdataconf_from || simple_pattern_matches(web_allow_netdataconf_from, w->client_ip)) + w->acl |= WEB_CLIENT_ACL_NETDATACONF; + + w->acl &= w->port_acl; } @@ -119,28 +139,4 @@ void web_client_initialize_connection(struct web_client *w) { web_client_cache_verify(0); } -struct web_client *web_client_create_on_listenfd(int listener) { - struct web_client *w; - - w = web_client_get_from_cache_or_allocate(); - w->ifd = w->ofd = accept_socket(listener, SOCK_NONBLOCK, w->client_ip, sizeof(w->client_ip), w->client_port, sizeof(w->client_port), web_allow_connections_from); - - if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-"); - if(unlikely(!*w->client_port)) strcpy(w->client_port, "-"); - - if (w->ifd == -1) { - if(errno == EPERM) - web_server_log_connection(w, "ACCESS DENIED"); - else { - web_server_log_connection(w, "CONNECTION FAILED"); - error("%llu: Failed to accept new incoming connection.", w->id); - } - - web_client_release(w); - return NULL; - } - - web_client_initialize_connection(w); - return(w); -} diff --git a/web/server/web_server.h b/web/server/web_server.h index 7777c8978..e7c2dd448 100644 --- a/web/server/web_server.h +++ b/web/server/web_server.h @@ -15,9 +15,7 @@ #endif typedef enum web_server_mode { - WEB_SERVER_MODE_SINGLE_THREADED, WEB_SERVER_MODE_STATIC_THREADED, - WEB_SERVER_MODE_MULTI_THREADED, WEB_SERVER_MODE_NONE } WEB_SERVER_MODE; @@ -27,13 +25,14 @@ extern SIMPLE_PATTERN *web_allow_registry_from; extern SIMPLE_PATTERN *web_allow_badges_from; extern SIMPLE_PATTERN *web_allow_streaming_from; extern SIMPLE_PATTERN *web_allow_netdataconf_from; +extern SIMPLE_PATTERN *web_allow_mgmt_from; extern WEB_SERVER_MODE web_server_mode; extern WEB_SERVER_MODE web_server_mode_id(const char *mode); extern const char *web_server_mode_name(WEB_SERVER_MODE id); -extern int api_listen_sockets_setup(void); +extern void api_listen_sockets_setup(void); #define DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST 60 #define DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS 60 @@ -51,8 +50,6 @@ extern struct web_client *web_client_create_on_listenfd(int listener); #include "web_client_cache.h" #endif // WEB_SERVER_INTERNALS -#include "single/single-threaded.h" -#include "multi/multi-threaded.h" #include "static/static-threaded.h" #include "daemon/common.h" -- cgit v1.2.3